table 16 arch/alpha/include/asm/agp.h #define free_gatt_pages(table, order) \ table 17 arch/alpha/include/asm/agp.h free_pages((unsigned long)(table), (order)) table 158 arch/arc/kernel/unwind.c struct unwind_table *table; table 160 arch/arc/kernel/unwind.c for (table = &root_table; table; table = table->link) table 161 arch/arc/kernel/unwind.c if ((pc >= table->core.pc table 162 arch/arc/kernel/unwind.c && pc < table->core.pc + table->core.range) table 163 arch/arc/kernel/unwind.c || (pc >= table->init.pc table 164 arch/arc/kernel/unwind.c && pc < table->init.pc + table->init.range)) table 167 arch/arc/kernel/unwind.c return table; table 172 arch/arc/kernel/unwind.c static void init_unwind_hdr(struct unwind_table *table, table 184 arch/arc/kernel/unwind.c static void init_unwind_table(struct unwind_table *table, const char *name, table 193 arch/arc/kernel/unwind.c table->core.pc = (unsigned long)core_start; table 194 arch/arc/kernel/unwind.c table->core.range = core_size; table 195 arch/arc/kernel/unwind.c table->init.pc = (unsigned long)init_start; table 196 arch/arc/kernel/unwind.c table->init.range = init_size; table 197 arch/arc/kernel/unwind.c table->address = table_start; table 198 arch/arc/kernel/unwind.c table->size = table_size; table 209 arch/arc/kernel/unwind.c table->hdrsz = header_size; table 211 arch/arc/kernel/unwind.c table->header = header_start; table 212 arch/arc/kernel/unwind.c table->link = NULL; table 213 arch/arc/kernel/unwind.c table->name = name; table 257 arch/arc/kernel/unwind.c static void init_unwind_hdr(struct unwind_table *table, table 261 arch/arc/kernel/unwind.c unsigned long tableSize = table->size, hdrSize; table 271 arch/arc/kernel/unwind.c struct eh_frame_hdr_table_entry table[]; table 274 arch/arc/kernel/unwind.c if (table->header) table 277 arch/arc/kernel/unwind.c if (table->hdrsz) table 279 arch/arc/kernel/unwind.c table->name); table 284 arch/arc/kernel/unwind.c for (fde = table->address, n = 0; table 287 arch/arc/kernel/unwind.c const u32 *cie = cie_for_fde(fde, table); table 325 arch/arc/kernel/unwind.c put_unaligned((unsigned long)table->address, &header->eh_frame_ptr); table 330 arch/arc/kernel/unwind.c BUILD_BUG_ON(offsetof(typeof(*header), table) table 331 arch/arc/kernel/unwind.c % __alignof(typeof(*header->table))); table 332 arch/arc/kernel/unwind.c for (fde = table->address, tableSize = table->size, n = 0; table 340 arch/arc/kernel/unwind.c header->table[n].start = read_pointer(&ptr, table 344 arch/arc/kernel/unwind.c header->table[n].fde = (unsigned long)fde; table 349 arch/arc/kernel/unwind.c sort(header->table, table 351 arch/arc/kernel/unwind.c sizeof(*header->table), table 354 arch/arc/kernel/unwind.c table->hdrsz = hdrSize; table 356 arch/arc/kernel/unwind.c table->header = (const void *)header; table 375 arch/arc/kernel/unwind.c struct unwind_table *table; table 380 arch/arc/kernel/unwind.c table = kmalloc(sizeof(*table), GFP_KERNEL); table 381 arch/arc/kernel/unwind.c if (!table) table 384 arch/arc/kernel/unwind.c init_unwind_table(table, module->name, table 390 arch/arc/kernel/unwind.c init_unwind_hdr(table, unw_hdr_alloc); table 394 arch/arc/kernel/unwind.c module->name, table->core.pc, table->core.range); table 397 arch/arc/kernel/unwind.c last_table->link = table; table 399 arch/arc/kernel/unwind.c root_table.link = table; table 400 arch/arc/kernel/unwind.c last_table = table; table 402 arch/arc/kernel/unwind.c return table; table 406 arch/arc/kernel/unwind.c struct unwind_table *table; table 413 arch/arc/kernel/unwind.c struct unwind_table *table = info->table, *prev; table 415 arch/arc/kernel/unwind.c for (prev = &root_table; prev->link && prev->link != table; table 421 arch/arc/kernel/unwind.c table->init.pc = 0; table 422 arch/arc/kernel/unwind.c table->init.range = 0; table 423 arch/arc/kernel/unwind.c info->table = NULL; table 425 arch/arc/kernel/unwind.c prev->link = table->link; table 430 arch/arc/kernel/unwind.c info->table = NULL; table 438 arch/arc/kernel/unwind.c struct unwind_table *table = handle; table 441 arch/arc/kernel/unwind.c if (!table || table == &root_table) table 444 arch/arc/kernel/unwind.c if (init_only && table == last_table) { table 445 arch/arc/kernel/unwind.c table->init.pc = 0; table 446 arch/arc/kernel/unwind.c table->init.range = 0; table 450 arch/arc/kernel/unwind.c info.table = table; table 454 arch/arc/kernel/unwind.c kfree(table->header); table 455 arch/arc/kernel/unwind.c kfree(table); table 513 arch/arc/kernel/unwind.c static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) table 909 arch/arc/kernel/unwind.c const struct unwind_table *table; table 932 arch/arc/kernel/unwind.c table = find_table(pc); table 933 arch/arc/kernel/unwind.c if (table != NULL table 934 arch/arc/kernel/unwind.c && !(table->size & (sizeof(*fde) - 1))) { table 935 arch/arc/kernel/unwind.c const u8 *hdr = table->header; table 958 arch/arc/kernel/unwind.c end = hdr + table->hdrsz; table 960 arch/arc/kernel/unwind.c == (unsigned long)table->address table 991 arch/arc/kernel/unwind.c cie = cie_for_fde(fde, table); table 58 arch/arm/include/asm/efi.h #define efi_table_attr(table, attr, instance) \ table 59 arch/arm/include/asm/efi.h ((table##_t *)instance)->attr table 14 arch/arm/include/asm/kvm_coproc.h const struct coproc_reg *table; table 17 arch/arm/include/asm/kvm_coproc.h void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); table 189 arch/arm/kernel/unwind.c struct unwind_table *table; table 192 arch/arm/kernel/unwind.c list_for_each_entry(table, &unwind_tables, list) { table 193 arch/arm/kernel/unwind.c if (addr >= table->begin_addr && table 194 arch/arm/kernel/unwind.c addr < table->end_addr) { table 195 arch/arm/kernel/unwind.c idx = search_index(addr, table->start, table 196 arch/arm/kernel/unwind.c table->origin, table 197 arch/arm/kernel/unwind.c table->stop); table 199 arch/arm/kernel/unwind.c list_move(&table->list, &unwind_tables); table 511 arch/arm/kvm/coproc.c static int check_reg_table(const struct coproc_reg *table, unsigned int n) table 516 arch/arm/kvm/coproc.c if (cmp_reg(&table[i-1], &table[i]) >= 0) { table 517 arch/arm/kvm/coproc.c kvm_err("reg table %p out of order (%d)\n", table, i - 1); table 528 arch/arm/kvm/coproc.c void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) table 530 arch/arm/kvm/coproc.c BUG_ON(check_reg_table(table->table, table->num)); table 531 arch/arm/kvm/coproc.c target_tables[table->target] = table; table 537 arch/arm/kvm/coproc.c struct kvm_coproc_target_table *table; table 539 arch/arm/kvm/coproc.c table = target_tables[target]; table 540 arch/arm/kvm/coproc.c *num = table->num; table 541 arch/arm/kvm/coproc.c return table->table; table 564 arch/arm/kvm/coproc.c const struct coproc_reg table[], table 569 arch/arm/kvm/coproc.c return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg); table 576 arch/arm/kvm/coproc.c const struct coproc_reg *table, *r; table 581 arch/arm/kvm/coproc.c table = get_target_table(vcpu->arch.target, &num); table 584 arch/arm/kvm/coproc.c r = find_reg(params, table, num); table 654 arch/arm/kvm/coproc.c const struct coproc_reg *table, size_t num, table 660 arch/arm/kvm/coproc.c if (table[i].reset) { table 661 arch/arm/kvm/coproc.c int reg = table[i].reg; table 663 arch/arm/kvm/coproc.c table[i].reset(vcpu, &table[i]); table 666 arch/arm/kvm/coproc.c if (table[i].is_64bit) table 770 arch/arm/kvm/coproc.c const struct coproc_reg *table, *r; table 780 arch/arm/kvm/coproc.c table = get_target_table(vcpu->arch.target, &num); table 781 arch/arm/kvm/coproc.c r = find_reg(¶ms, table, num); table 1443 arch/arm/kvm/coproc.c const struct coproc_reg *table; table 1449 arch/arm/kvm/coproc.c table = get_target_table(vcpu->arch.target, &num); table 1450 arch/arm/kvm/coproc.c reset_coproc_regs(vcpu, table, num, bmap); table 30 arch/arm/kvm/coproc_a15.c .table = a15_regs, table 33 arch/arm/kvm/coproc_a7.c .table = a7_regs, table 85 arch/arm/mach-davinci/board-da830-evm.c .table = { table 93 arch/arm/mach-davinci/board-da830-evm.c .table = { table 200 arch/arm/mach-davinci/board-da830-evm.c .table = { table 651 arch/arm/mach-davinci/board-da850-evm.c .table = { table 783 arch/arm/mach-davinci/board-da850-evm.c .table = { table 813 arch/arm/mach-davinci/board-da850-evm.c .table = { table 853 arch/arm/mach-davinci/board-da850-evm.c .table = { table 119 arch/arm/mach-davinci/board-dm355-evm.c .table = { table 661 arch/arm/mach-davinci/board-dm644x-evm.c .table = { table 136 arch/arm/mach-davinci/board-omapl138-hawk.c .table = { table 330 arch/arm/mach-davinci/board-omapl138-hawk.c .table = { table 338 arch/arm/mach-davinci/board-omapl138-hawk.c .table = { table 334 arch/arm/mach-ep93xx/core.c .table = { table 107 arch/arm/mach-ep93xx/edb93xx.c .table = { table 48 arch/arm/mach-ep93xx/simone.c .table = { table 77 arch/arm/mach-ep93xx/simone.c .table = { table 271 arch/arm/mach-ep93xx/ts72xx.c .table = { table 320 arch/arm/mach-ep93xx/ts72xx.c .table = { table 207 arch/arm/mach-ep93xx/vision_ep9307.c .table = { table 246 arch/arm/mach-ep93xx/vision_ep9307.c .table = { table 212 arch/arm/mach-imx/mach-mx21ads.c .table = { table 230 arch/arm/mach-imx/mach-mx27ads.c .table = { table 340 arch/arm/mach-imx/mach-pcm043.c .table = { table 390 arch/arm/mach-integrator/impd1.c struct_size(lookup, table, 3), table 413 arch/arm/mach-integrator/impd1.c lookup->table[0].chip_label = chipname; table 414 arch/arm/mach-integrator/impd1.c lookup->table[0].chip_hwnum = 3; table 415 arch/arm/mach-integrator/impd1.c lookup->table[0].con_id = "wp"; table 416 arch/arm/mach-integrator/impd1.c lookup->table[1].chip_label = chipname; table 417 arch/arm/mach-integrator/impd1.c lookup->table[1].chip_hwnum = 4; table 418 arch/arm/mach-integrator/impd1.c lookup->table[1].con_id = "cd"; table 419 arch/arm/mach-integrator/impd1.c lookup->table[1].flags = GPIO_ACTIVE_LOW; table 38 arch/arm/mach-iop32x/i2c.c .table = { table 47 arch/arm/mach-iop32x/i2c.c .table = { table 57 arch/arm/mach-ixp4xx/avila-setup.c .table = { table 76 arch/arm/mach-ixp4xx/dsmg600-setup.c .table = { table 62 arch/arm/mach-ixp4xx/fsg-setup.c .table = { table 130 arch/arm/mach-ixp4xx/ixdp425-setup.c .table = { table 108 arch/arm/mach-ixp4xx/nas100d-setup.c .table = { table 76 arch/arm/mach-ixp4xx/nslu2-setup.c .table = { table 170 arch/arm/mach-ixp4xx/vulcan-setup.c .table = { table 163 arch/arm/mach-mmp/brownstone.c .table = { table 284 arch/arm/mach-omap1/board-ams-delta.c .table = { table 306 arch/arm/mach-omap1/board-ams-delta.c .table = { table 363 arch/arm/mach-omap1/board-ams-delta.c .table = { table 406 arch/arm/mach-omap1/board-ams-delta.c .table = { table 474 arch/arm/mach-omap1/board-ams-delta.c .table = { table 551 arch/arm/mach-omap1/board-ams-delta.c .table = { table 219 arch/arm/mach-omap1/board-nokia770.c .table = { table 179 arch/arm/mach-pxa/cm-x255.c .table = { table 293 arch/arm/mach-pxa/cm-x270.c .table = { table 357 arch/arm/mach-pxa/cm-x300.c .table = { table 463 arch/arm/mach-pxa/cm-x300.c .table = { table 42 arch/arm/mach-pxa/colibri-evalboard.c .table = { table 51 arch/arm/mach-pxa/colibri-evalboard.c .table = { table 60 arch/arm/mach-pxa/colibri-evalboard.c .table = { table 56 arch/arm/mach-pxa/colibri-pxa270-income.c .table = { table 497 arch/arm/mach-pxa/corgi.c .table = { table 132 arch/arm/mach-pxa/csb726.c .table = { table 549 arch/arm/mach-pxa/em-x270.c .table = { table 726 arch/arm/mach-pxa/ezx.c .table = { table 734 arch/arm/mach-pxa/hx4700.c .table = { table 280 arch/arm/mach-pxa/littleton.c .table = { table 138 arch/arm/mach-pxa/lubbock.c .table = { table 677 arch/arm/mach-pxa/magician.c .table = { table 799 arch/arm/mach-pxa/magician.c .table = { table 521 arch/arm/mach-pxa/mainstone.c .table = { table 391 arch/arm/mach-pxa/mioa701.c .table = { table 330 arch/arm/mach-pxa/mxm8x10.c .table = { table 289 arch/arm/mach-pxa/palmld.c .table = { table 333 arch/arm/mach-pxa/palmld.c .table = { table 183 arch/arm/mach-pxa/palmt5.c .table = { table 125 arch/arm/mach-pxa/palmtc.c .table = { table 105 arch/arm/mach-pxa/palmte2.c .table = { table 481 arch/arm/mach-pxa/palmtreo.c .table = { table 505 arch/arm/mach-pxa/palmtreo.c .table = { table 338 arch/arm/mach-pxa/palmtx.c .table = { table 322 arch/arm/mach-pxa/palmz72.c .table = { table 387 arch/arm/mach-pxa/palmz72.c .table = { table 293 arch/arm/mach-pxa/poodle.c .table = { table 619 arch/arm/mach-pxa/spitz.c .table = { table 139 arch/arm/mach-pxa/stargate2.c .table = { table 295 arch/arm/mach-pxa/tosa.c .table = { table 461 arch/arm/mach-pxa/viper.c .table = { table 790 arch/arm/mach-pxa/viper.c .table = { table 245 arch/arm/mach-pxa/vpac270.c .table = { table 296 arch/arm/mach-pxa/z2.c .table = { table 424 arch/arm/mach-pxa/zeus.c .table = { table 561 arch/arm/mach-pxa/zeus.c .table = { table 667 arch/arm/mach-pxa/zeus.c .table = { table 238 arch/arm/mach-pxa/zylonite.c .table = { table 254 arch/arm/mach-pxa/zylonite.c .table = { table 270 arch/arm/mach-pxa/zylonite.c .table = { table 144 arch/arm/mach-s3c24xx/mach-at2440evb.c .table = { table 469 arch/arm/mach-s3c24xx/mach-h1940.c .table = { table 403 arch/arm/mach-s3c24xx/mach-jive.c .table = { table 428 arch/arm/mach-s3c24xx/mach-jive.c .table = { table 245 arch/arm/mach-s3c24xx/mach-mini2440.c .table = { table 360 arch/arm/mach-s3c24xx/mach-n30.c .table = { table 209 arch/arm/mach-s3c24xx/mach-qt2410.c .table = { table 568 arch/arm/mach-s3c24xx/mach-rx1950.c .table = { table 212 arch/arm/mach-s3c64xx/mach-crag6410-module.c .table = { table 252 arch/arm/mach-s3c64xx/mach-crag6410-module.c .table = { table 285 arch/arm/mach-s3c64xx/mach-crag6410-module.c .table = { table 611 arch/arm/mach-s3c64xx/mach-crag6410.c .table = { table 220 arch/arm/mach-s3c64xx/mach-smartq.c .table = { table 395 arch/arm/mach-s3c64xx/mach-smartq.c .table = { table 451 arch/arm/mach-sa1100/assabet.c .table = { table 473 arch/arm/mach-sa1100/assabet.c .table = { table 524 arch/arm/mach-sa1100/assabet.c .table = { table 536 arch/arm/mach-sa1100/assabet.c .table = { table 49 arch/arm/mach-sa1100/cerf.c .table = { table 235 arch/arm/mach-sa1100/generic.c void sa11x0_register_pcmcia(int socket, struct gpiod_lookup_table *table) table 237 arch/arm/mach-sa1100/generic.c if (table) table 238 arch/arm/mach-sa1100/generic.c gpiod_add_lookup_table(table); table 124 arch/arm/mach-sa1100/h3xxx.c .table = { table 223 arch/arm/mach-sa1100/h3xxx.c .table = { table 191 arch/arm/mach-sa1100/jornada720.c .table = { table 241 arch/arm/mach-sa1100/jornada720.c .table = { table 101 arch/arm/mach-sa1100/nanoengine.c .table = { table 111 arch/arm/mach-sa1100/nanoengine.c .table = { table 86 arch/arm/mach-sa1100/neponset.c .table = { table 98 arch/arm/mach-sa1100/neponset.c .table = { table 110 arch/arm/mach-sa1100/neponset.c .table = { table 80 arch/arm/mach-sa1100/shannon.c .table = { table 89 arch/arm/mach-sa1100/shannon.c .table = { table 330 arch/arm/mach-sa1100/simpad.c .table = { table 370 arch/arm/mach-sa1100/simpad.c .table = { table 30 arch/arm/mach-tegra/board-paz00.c .table = { table 268 arch/arm/plat-samsung/include/plat/cpu-freq-core.h static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table, table 275 arch/arm/plat-samsung/include/plat/cpu-freq-core.h if (table) { table 282 arch/arm/plat-samsung/include/plat/cpu-freq-core.h table[index].driver_data = index; table 283 arch/arm/plat-samsung/include/plat/cpu-freq-core.h table[index].frequency = freq; table 412 arch/arm/probes/decode.c const union decode_item *table, bool thumb, table 416 arch/arm/probes/decode.c const struct decode_header *h = (struct decode_header *)table; table 465 arch/arm/probes/decode.c next = (struct decode_header *)d->table.table; table 296 arch/arm/probes/decode.h const union decode_item *table; table 341 arch/arm/probes/decode.h {.table = (_table)} table 22 arch/arm/probes/kprobes/checkers-arm.c static const union decode_item table[] = { table 83 arch/arm/probes/kprobes/checkers-arm.c return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL); table 21 arch/arm/probes/kprobes/checkers-thumb.c static const union decode_item table[] = { table 75 arch/arm/probes/kprobes/checkers-thumb.c return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL); table 596 arch/arm/probes/kprobes/test-core.c static int table_iter(const union decode_item *table, table 600 arch/arm/probes/kprobes/test-core.c const struct decode_header *h = (struct decode_header *)table; table 652 arch/arm/probes/kprobes/test-core.c return table_iter(d->table.table, table_test_fn, &args2); table 658 arch/arm/probes/kprobes/test-core.c static int table_test(const union decode_item *table) table 661 arch/arm/probes/kprobes/test-core.c .root_table = table, table 756 arch/arm/probes/kprobes/test-core.c ret = table_iter(d->table.table, coverage_start_fn, coverage); table 764 arch/arm/probes/kprobes/test-core.c static int coverage_start(const union decode_item *table) table 771 arch/arm/probes/kprobes/test-core.c return table_iter(table, coverage_start_fn, &coverage); table 1551 arch/arm/probes/kprobes/test-core.c static int run_test_cases(void (*tests)(void), const union decode_item *table) table 1556 arch/arm/probes/kprobes/test-core.c ret = table_test(table); table 1561 arch/arm/probes/kprobes/test-core.c ret = coverage_start(table); table 101 arch/arm64/include/asm/efi.h #define efi_table_attr(table, attr, instance) \ table 102 arch/arm64/include/asm/efi.h ((table##_t *)instance)->attr table 18 arch/arm64/include/asm/kvm_coproc.h const struct sys_reg_desc *table; table 28 arch/arm64/include/asm/kvm_coproc.h struct kvm_sys_reg_target_table *table); table 128 arch/arm64/kernel/acpi.c struct acpi_table_header *table; table 137 arch/arm64/kernel/acpi.c status = acpi_get_table(ACPI_SIG_FADT, 0, &table); table 145 arch/arm64/kernel/acpi.c fadt = (struct acpi_table_fadt *)table; table 153 arch/arm64/kernel/acpi.c if (table->revision < 5 || table 154 arch/arm64/kernel/acpi.c (table->revision == 5 && fadt->minor_revision < 1)) { table 156 arch/arm64/kernel/acpi.c table->revision, fadt->minor_revision); table 175 arch/arm64/kernel/acpi.c acpi_put_table(table); table 205 arch/arm64/kernel/armv8_deprecated.c static int emulation_proc_handler(struct ctl_table *table, int write, table 210 arch/arm64/kernel/armv8_deprecated.c struct insn_emulation *insn = (struct insn_emulation *) table->data; table 213 arch/arm64/kernel/armv8_deprecated.c table->data = &insn->current_mode; table 214 arch/arm64/kernel/armv8_deprecated.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 226 arch/arm64/kernel/armv8_deprecated.c table->data = insn; table 368 arch/arm64/kernel/cpufeature.c #define ARM64_FTR_REG(id, table) { \ table 372 arch/arm64/kernel/cpufeature.c .ftr_bits = &((table)[0]), \ table 343 arch/arm64/kernel/fpsimd.c static int sve_proc_do_default_vl(struct ctl_table *table, int write, table 1988 arch/arm64/kvm/sys_regs.c struct kvm_sys_reg_target_table *table) table 1990 arch/arm64/kvm/sys_regs.c target_tables[target] = table; table 1998 arch/arm64/kvm/sys_regs.c struct kvm_sys_reg_target_table *table; table 2000 arch/arm64/kvm/sys_regs.c table = target_tables[target]; table 2002 arch/arm64/kvm/sys_regs.c *num = table->table64.num; table 2003 arch/arm64/kvm/sys_regs.c return table->table64.table; table 2005 arch/arm64/kvm/sys_regs.c *num = table->table32.num; table 2006 arch/arm64/kvm/sys_regs.c return table->table32.table; table 2019 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc table[], table 2024 arch/arm64/kvm/sys_regs.c return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); table 2069 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc *table, table 2074 arch/arm64/kvm/sys_regs.c if (!table) table 2077 arch/arm64/kvm/sys_regs.c r = find_reg(params, table, num); table 2246 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc *table, *r; table 2248 arch/arm64/kvm/sys_regs.c table = get_target_table(vcpu->arch.target, true, &num); table 2251 arch/arm64/kvm/sys_regs.c r = find_reg(params, table, num); table 2267 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc *table, size_t num, table 2273 arch/arm64/kvm/sys_regs.c if (table[i].reset) { table 2274 arch/arm64/kvm/sys_regs.c int reg = table[i].reg; table 2276 arch/arm64/kvm/sys_regs.c table[i].reset(vcpu, &table[i]); table 2348 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc table[], table 2354 arch/arm64/kvm/sys_regs.c return find_reg(params, table, num); table 2362 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc *table, *r; table 2372 arch/arm64/kvm/sys_regs.c table = get_target_table(vcpu->arch.target, true, &num); table 2373 arch/arm64/kvm/sys_regs.c r = find_reg(¶ms, table, num); table 2735 arch/arm64/kvm/sys_regs.c static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) table 2740 arch/arm64/kvm/sys_regs.c if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { table 2741 arch/arm64/kvm/sys_regs.c kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); table 2795 arch/arm64/kvm/sys_regs.c const struct sys_reg_desc *table; table 2801 arch/arm64/kvm/sys_regs.c table = get_target_table(vcpu->arch.target, true, &num); table 2802 arch/arm64/kvm/sys_regs.c reset_sys_reg_descs(vcpu, table, num, bmap); table 140 arch/arm64/kvm/sys_regs.h const struct sys_reg_desc table[], table 55 arch/arm64/kvm/sys_regs_generic_v8.c .table = genericv8_sys_regs, table 59 arch/arm64/kvm/sys_regs_generic_v8.c .table = genericv8_cp15_regs, table 1002 arch/arm64/mm/mmu.c pte_t *table; table 1012 arch/arm64/mm/mmu.c table = pte_offset_kernel(pmdp, addr); table 1015 arch/arm64/mm/mmu.c pte_free_kernel(NULL, table); table 1021 arch/arm64/mm/mmu.c pmd_t *table; table 1033 arch/arm64/mm/mmu.c table = pmd_offset(pudp, addr); table 1034 arch/arm64/mm/mmu.c pmdp = table; table 1043 arch/arm64/mm/mmu.c pmd_free(NULL, table); table 24 arch/ia64/include/asm/agp.h #define free_gatt_pages(table, order) \ table 25 arch/ia64/include/asm/agp.h free_pages((unsigned long)(table), (order)) table 292 arch/ia64/kernel/acpi.c static int __init acpi_parse_madt(struct acpi_table_header *table) table 294 arch/ia64/kernel/acpi.c acpi_madt = (struct acpi_table_madt *)table; table 556 arch/ia64/kernel/acpi.c static int __init acpi_parse_fadt(struct acpi_table_header *table) table 561 arch/ia64/kernel/acpi.c fadt_header = (struct acpi_table_header *)table; table 63 arch/ia64/kernel/esi.c esi = config_tables[i].table; table 1501 arch/ia64/kernel/unwind.c lookup (struct unw_table *table, unsigned long rel_ip) table 1507 arch/ia64/kernel/unwind.c for (lo = 0, hi = table->length; lo < hi; ) { table 1509 arch/ia64/kernel/unwind.c e = &table->array[mid]; table 1534 arch/ia64/kernel/unwind.c struct unw_table *table, *prev; table 1564 arch/ia64/kernel/unwind.c for (table = unw.tables; table; table = table->next) { table 1565 arch/ia64/kernel/unwind.c if (ip >= table->start && ip < table->end) { table 1575 arch/ia64/kernel/unwind.c prev->next = table->next; table 1576 arch/ia64/kernel/unwind.c table->next = unw.tables->next; table 1577 arch/ia64/kernel/unwind.c unw.tables->next = table; table 1579 arch/ia64/kernel/unwind.c e = lookup(table, ip - table->segment_base); table 1582 arch/ia64/kernel/unwind.c prev = table; table 1598 arch/ia64/kernel/unwind.c sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16 table 1600 arch/ia64/kernel/unwind.c hdr = *(u64 *) (table->segment_base + e->info_offset); table 1601 arch/ia64/kernel/unwind.c dp = (u8 *) (table->segment_base + e->info_offset + 8); table 1642 arch/ia64/kernel/unwind.c __func__, table->segment_base + e->start_offset, sr.when_target); table 2082 arch/ia64/kernel/unwind.c init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base, table 2087 arch/ia64/kernel/unwind.c table->name = name; table 2088 arch/ia64/kernel/unwind.c table->segment_base = segment_base; table 2089 arch/ia64/kernel/unwind.c table->gp = gp; table 2090 arch/ia64/kernel/unwind.c table->start = segment_base + start[0].start_offset; table 2091 arch/ia64/kernel/unwind.c table->end = segment_base + end[-1].end_offset; table 2092 arch/ia64/kernel/unwind.c table->array = start; table 2093 arch/ia64/kernel/unwind.c table->length = end - start; table 2101 arch/ia64/kernel/unwind.c struct unw_table *table; table 2110 arch/ia64/kernel/unwind.c table = kmalloc(sizeof(*table), GFP_USER); table 2111 arch/ia64/kernel/unwind.c if (!table) table 2114 arch/ia64/kernel/unwind.c init_unwind_table(table, name, segment_base, gp, table_start, table_end); table 2119 arch/ia64/kernel/unwind.c table->next = unw.tables->next; table 2120 arch/ia64/kernel/unwind.c unw.tables->next = table; table 2124 arch/ia64/kernel/unwind.c return table; table 2130 arch/ia64/kernel/unwind.c struct unw_table *table, *prev; table 2141 arch/ia64/kernel/unwind.c table = handle; table 2142 arch/ia64/kernel/unwind.c if (table == &unw.kernel_table) { table 2153 arch/ia64/kernel/unwind.c if (prev->next == table) table 2157 arch/ia64/kernel/unwind.c __func__, (void *) table); table 2161 arch/ia64/kernel/unwind.c prev->next = table->next; table 2170 arch/ia64/kernel/unwind.c || tmp->ip < table->start || tmp->ip >= table->end) table 2175 arch/ia64/kernel/unwind.c if (tmp->ip >= table->start && tmp->ip < table->end) { table 2183 arch/ia64/kernel/unwind.c kfree(table); table 212 arch/mips/alchemy/board-gpr.c .table = { table 123 arch/mips/alchemy/board-mtx1.c .table = { table 419 arch/mips/alchemy/devboards/db1000.c .table = { table 886 arch/mips/kernel/mips-r2-to-r6-emul.c const struct r2_decoder_table *table) table 891 arch/mips/kernel/mips-r2-to-r6-emul.c for (p = table; p->func; p++) { table 29 arch/mips/lasat/sysctl.c int proc_dolasatstring(struct ctl_table *table, int write, table 34 arch/mips/lasat/sysctl.c r = proc_dostring(table, write, buffer, lenp, ppos); table 47 arch/mips/lasat/sysctl.c int proc_dolasatrtc(struct ctl_table *table, int write, table 60 arch/mips/lasat/sysctl.c r = proc_dointvec(table, write, buffer, lenp, ppos); table 80 arch/mips/lasat/sysctl.c int proc_lasat_ip(struct ctl_table *table, int write, table 88 arch/mips/lasat/sysctl.c if (!table->data || !table->maxlen || !*lenp || table 112 arch/mips/lasat/sysctl.c *(unsigned int *)(table->data) = ip; table 115 arch/mips/lasat/sysctl.c ip = *(unsigned int *)(table->data); table 140 arch/mips/lasat/sysctl.c int proc_lasat_prid(struct ctl_table *table, int write, table 145 arch/mips/lasat/sysctl.c r = proc_dointvec(table, write, buffer, lenp, ppos); table 12 arch/mips/math-emu/dp_sqrt.c static const unsigned int table[] = { table 88 arch/mips/math-emu/dp_sqrt.c yh = yh - table[(yh >> 15) & 31]; table 124 arch/mips/rb532/devices.c .table = { table 18 arch/parisc/include/asm/agp.h #define free_gatt_pages(table, order) \ table 19 arch/parisc/include/asm/agp.h free_pages((unsigned long)(table), (order)) table 54 arch/parisc/include/asm/unwind.h const struct unwind_table_entry *table; table 72 arch/parisc/include/asm/unwind.h unwind_table_remove(struct unwind_table *table); table 834 arch/parisc/kernel/module.c unsigned char *table, *end; table 840 arch/parisc/kernel/module.c table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; table 841 arch/parisc/kernel/module.c end = table + sechdrs[me->arch.unwind_section].sh_size; table 845 arch/parisc/kernel/module.c me->arch.unwind_section, table, end, gp); table 846 arch/parisc/kernel/module.c me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end); table 47 arch/parisc/kernel/unwind.c find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) table 53 arch/parisc/kernel/unwind.c hi = table->length - 1; table 57 arch/parisc/kernel/unwind.c e = &table->table[mid]; table 72 arch/parisc/kernel/unwind.c struct unwind_table *table; table 82 arch/parisc/kernel/unwind.c list_for_each_entry(table, &unwind_tables, list) { table 83 arch/parisc/kernel/unwind.c if (addr >= table->start && table 84 arch/parisc/kernel/unwind.c addr <= table->end) table 85 arch/parisc/kernel/unwind.c e = find_unwind_entry_in_table(table, addr); table 88 arch/parisc/kernel/unwind.c list_move(&table->list, &unwind_tables); table 99 arch/parisc/kernel/unwind.c unwind_table_init(struct unwind_table *table, const char *name, table 107 arch/parisc/kernel/unwind.c table->name = name; table 108 arch/parisc/kernel/unwind.c table->base_addr = base_addr; table 109 arch/parisc/kernel/unwind.c table->gp = gp; table 110 arch/parisc/kernel/unwind.c table->start = base_addr + start->region_start; table 111 arch/parisc/kernel/unwind.c table->end = base_addr + end->region_end; table 112 arch/parisc/kernel/unwind.c table->table = (struct unwind_table_entry *)table_start; table 113 arch/parisc/kernel/unwind.c table->length = end - start + 1; table 114 arch/parisc/kernel/unwind.c INIT_LIST_HEAD(&table->list); table 147 arch/parisc/kernel/unwind.c struct unwind_table *table; table 154 arch/parisc/kernel/unwind.c table = kmalloc(sizeof(struct unwind_table), GFP_USER); table 155 arch/parisc/kernel/unwind.c if (table == NULL) table 157 arch/parisc/kernel/unwind.c unwind_table_init(table, name, base_addr, gp, start, end); table 159 arch/parisc/kernel/unwind.c list_add_tail(&table->list, &unwind_tables); table 162 arch/parisc/kernel/unwind.c return table; table 165 arch/parisc/kernel/unwind.c void unwind_table_remove(struct unwind_table *table) table 170 arch/parisc/kernel/unwind.c list_del(&table->list); table 173 arch/parisc/kernel/unwind.c kfree(table); table 21 arch/powerpc/boot/ep405.c static char *table; table 30 arch/powerpc/boot/ep405.c planetcore_set_mac_addrs(table); table 32 arch/powerpc/boot/ep405.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { table 40 arch/powerpc/boot/ep405.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_KB_NVRAM, &val)) { table 56 arch/powerpc/boot/ep405.c table = (char *)r3; table 57 arch/powerpc/boot/ep405.c planetcore_prepare_table(table); table 59 arch/powerpc/boot/ep405.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) table 67 arch/powerpc/boot/ep405.c planetcore_set_stdout_path(table); table 15 arch/powerpc/boot/ep8248e.c static char *table; table 25 arch/powerpc/boot/ep8248e.c planetcore_set_mac_addrs(table); table 27 arch/powerpc/boot/ep8248e.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { table 38 arch/powerpc/boot/ep8248e.c table = (char *)r3; table 39 arch/powerpc/boot/ep8248e.c planetcore_prepare_table(table); table 41 arch/powerpc/boot/ep8248e.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) table 49 arch/powerpc/boot/ep8248e.c planetcore_set_stdout_path(table); table 15 arch/powerpc/boot/ep88xc.c static char *table; table 23 arch/powerpc/boot/ep88xc.c planetcore_set_mac_addrs(table); table 25 arch/powerpc/boot/ep88xc.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { table 36 arch/powerpc/boot/ep88xc.c table = (char *)r3; table 37 arch/powerpc/boot/ep88xc.c planetcore_prepare_table(table); table 39 arch/powerpc/boot/ep88xc.c if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) table 47 arch/powerpc/boot/ep88xc.c planetcore_set_stdout_path(table); table 26 arch/powerpc/boot/planetcore.c void planetcore_prepare_table(char *table) table 29 arch/powerpc/boot/planetcore.c if (*table == '\n') table 30 arch/powerpc/boot/planetcore.c *table = 0; table 32 arch/powerpc/boot/planetcore.c table++; table 33 arch/powerpc/boot/planetcore.c } while (*(table - 1) || *table != '\n'); table 35 arch/powerpc/boot/planetcore.c *table = 0; table 38 arch/powerpc/boot/planetcore.c const char *planetcore_get_key(const char *table, const char *key) table 43 arch/powerpc/boot/planetcore.c if (!strncmp(table, key, keylen) && table[keylen] == '=') table 44 arch/powerpc/boot/planetcore.c return table + keylen + 1; table 46 arch/powerpc/boot/planetcore.c table += strlen(table) + 1; table 47 arch/powerpc/boot/planetcore.c } while (strlen(table) != 0); table 52 arch/powerpc/boot/planetcore.c int planetcore_get_decimal(const char *table, const char *key, u64 *val) table 54 arch/powerpc/boot/planetcore.c const char *str = planetcore_get_key(table, key); table 62 arch/powerpc/boot/planetcore.c int planetcore_get_hex(const char *table, const char *key, u64 *val) table 64 arch/powerpc/boot/planetcore.c const char *str = planetcore_get_key(table, key); table 79 arch/powerpc/boot/planetcore.c void planetcore_set_mac_addrs(const char *table) table 86 arch/powerpc/boot/planetcore.c if (!planetcore_get_hex(table, PLANETCORE_KEY_MAC_ADDR, &int_addr)) table 104 arch/powerpc/boot/planetcore.c void planetcore_set_stdout_path(const char *table) table 110 arch/powerpc/boot/planetcore.c label = planetcore_get_key(table, PLANETCORE_KEY_SERIAL_PORT); table 26 arch/powerpc/boot/planetcore.h void planetcore_prepare_table(char *table); table 33 arch/powerpc/boot/planetcore.h const char *planetcore_get_key(const char *table, const char *key); table 34 arch/powerpc/boot/planetcore.h int planetcore_get_decimal(const char *table, const char *key, u64 *val); table 35 arch/powerpc/boot/planetcore.h int planetcore_get_hex(const char *table, const char *key, u64 *val); table 40 arch/powerpc/boot/planetcore.h void planetcore_set_mac_addrs(const char *table); table 45 arch/powerpc/boot/planetcore.h void planetcore_set_stdout_path(const char *table); table 15 arch/powerpc/include/asm/agp.h #define free_gatt_pages(table, order) \ table 16 arch/powerpc/include/asm/agp.h free_pages((unsigned long)(table), (order)) table 40 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline void pgtable_free(void *table, unsigned index_size) table 43 arch/powerpc/include/asm/book3s/32/pgalloc.h pte_fragment_free((unsigned long *)table, 0); table 46 arch/powerpc/include/asm/book3s/32/pgalloc.h kmem_cache_free(PGT_CACHE(index_size), table); table 53 arch/powerpc/include/asm/book3s/32/pgalloc.h void *table, int shift) table 55 arch/powerpc/include/asm/book3s/32/pgalloc.h unsigned long pgf = (unsigned long)table; table 63 arch/powerpc/include/asm/book3s/32/pgalloc.h void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); table 66 arch/powerpc/include/asm/book3s/32/pgalloc.h pgtable_free(table, shift); table 69 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, table 72 arch/powerpc/include/asm/book3s/32/pgalloc.h pgtable_free_tlb(tlb, table, 0); table 21 arch/powerpc/include/asm/book3s/64/pgalloc.h extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); table 164 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, table 172 arch/powerpc/include/asm/book3s/64/pgalloc.h pgtable_free_tlb(tlb, table, PTE_INDEX); table 7 arch/powerpc/include/asm/book3s/pgalloc.h extern void tlb_remove_table(struct mmu_gather *tlb, void *table); table 191 arch/powerpc/include/asm/kvm_book3s.h struct kvmppc_pte *gpte, u64 table, table 8 arch/powerpc/include/asm/nohash/pgalloc.h extern void tlb_remove_table(struct mmu_gather *tlb, void *table); table 37 arch/powerpc/include/asm/nohash/pgalloc.h static inline void pgtable_free(void *table, int shift) table 40 arch/powerpc/include/asm/nohash/pgalloc.h pte_fragment_free((unsigned long *)table, 0); table 43 arch/powerpc/include/asm/nohash/pgalloc.h kmem_cache_free(PGT_CACHE(shift), table); table 49 arch/powerpc/include/asm/nohash/pgalloc.h static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) table 51 arch/powerpc/include/asm/nohash/pgalloc.h unsigned long pgf = (unsigned long)table; table 60 arch/powerpc/include/asm/nohash/pgalloc.h void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); table 63 arch/powerpc/include/asm/nohash/pgalloc.h pgtable_free(table, shift); table 66 arch/powerpc/include/asm/nohash/pgalloc.h static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, table 70 arch/powerpc/include/asm/nohash/pgalloc.h pgtable_free_tlb(tlb, table, 0); table 36 arch/powerpc/include/asm/pgalloc.h void pte_fragment_free(unsigned long *table, int kernel); table 395 arch/powerpc/kernel/mce_power.c const struct mce_ierror_table table[], table 405 arch/powerpc/kernel/mce_power.c for (i = 0; table[i].srr1_mask; i++) { table 406 arch/powerpc/kernel/mce_power.c if ((srr1 & table[i].srr1_mask) != table[i].srr1_value) table 410 arch/powerpc/kernel/mce_power.c switch (table[i].error_type) { table 425 arch/powerpc/kernel/mce_power.c mce_err->error_type = table[i].error_type; table 426 arch/powerpc/kernel/mce_power.c mce_err->error_class = table[i].error_class; table 427 arch/powerpc/kernel/mce_power.c switch (table[i].error_type) { table 429 arch/powerpc/kernel/mce_power.c mce_err->u.ue_error_type = table[i].error_subtype; table 432 arch/powerpc/kernel/mce_power.c mce_err->u.slb_error_type = table[i].error_subtype; table 435 arch/powerpc/kernel/mce_power.c mce_err->u.erat_error_type = table[i].error_subtype; table 438 arch/powerpc/kernel/mce_power.c mce_err->u.tlb_error_type = table[i].error_subtype; table 441 arch/powerpc/kernel/mce_power.c mce_err->u.user_error_type = table[i].error_subtype; table 444 arch/powerpc/kernel/mce_power.c mce_err->u.ra_error_type = table[i].error_subtype; table 447 arch/powerpc/kernel/mce_power.c mce_err->u.link_error_type = table[i].error_subtype; table 450 arch/powerpc/kernel/mce_power.c mce_err->sync_error = table[i].sync_error; table 451 arch/powerpc/kernel/mce_power.c mce_err->severity = table[i].severity; table 452 arch/powerpc/kernel/mce_power.c mce_err->initiator = table[i].initiator; table 453 arch/powerpc/kernel/mce_power.c if (table[i].nip_valid) { table 456 arch/powerpc/kernel/mce_power.c table[i].error_type == MCE_ERROR_TYPE_UE) { table 481 arch/powerpc/kernel/mce_power.c const struct mce_derror_table table[], table 492 arch/powerpc/kernel/mce_power.c for (i = 0; table[i].dsisr_value; i++) { table 493 arch/powerpc/kernel/mce_power.c if (!(dsisr & table[i].dsisr_value)) table 497 arch/powerpc/kernel/mce_power.c switch (table[i].error_type) { table 523 arch/powerpc/kernel/mce_power.c mce_err->error_type = table[i].error_type; table 524 arch/powerpc/kernel/mce_power.c mce_err->error_class = table[i].error_class; table 525 arch/powerpc/kernel/mce_power.c switch (table[i].error_type) { table 527 arch/powerpc/kernel/mce_power.c mce_err->u.ue_error_type = table[i].error_subtype; table 530 arch/powerpc/kernel/mce_power.c mce_err->u.slb_error_type = table[i].error_subtype; table 533 arch/powerpc/kernel/mce_power.c mce_err->u.erat_error_type = table[i].error_subtype; table 536 arch/powerpc/kernel/mce_power.c mce_err->u.tlb_error_type = table[i].error_subtype; table 539 arch/powerpc/kernel/mce_power.c mce_err->u.user_error_type = table[i].error_subtype; table 542 arch/powerpc/kernel/mce_power.c mce_err->u.ra_error_type = table[i].error_subtype; table 545 arch/powerpc/kernel/mce_power.c mce_err->u.link_error_type = table[i].error_subtype; table 548 arch/powerpc/kernel/mce_power.c mce_err->sync_error = table[i].sync_error; table 549 arch/powerpc/kernel/mce_power.c mce_err->severity = table[i].severity; table 550 arch/powerpc/kernel/mce_power.c mce_err->initiator = table[i].initiator; table 551 arch/powerpc/kernel/mce_power.c if (table[i].dar_valid) table 554 arch/powerpc/kernel/mce_power.c table[i].error_type == MCE_ERROR_TYPE_UE) { table 221 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvmppc_pte *gpte, u64 table, table 229 arch/powerpc/kvm/book3s_64_mmu_radix.c if ((table & PRTS_MASK) > 24) table 231 arch/powerpc/kvm/book3s_64_mmu_radix.c size = 1ul << ((table & PRTS_MASK) + 12); table 238 arch/powerpc/kvm/book3s_64_mmu_radix.c ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); table 838 arch/powerpc/mm/book3s64/hash_utils.c unsigned long table; table 891 arch/powerpc/mm/book3s64/hash_utils.c table = memblock_phys_alloc_range(htab_size_bytes, table 894 arch/powerpc/mm/book3s64/hash_utils.c if (!table) table 898 arch/powerpc/mm/book3s64/hash_utils.c DBG("Hash table allocated at %lx, size: %lx\n", table, table 901 arch/powerpc/mm/book3s64/hash_utils.c htab_address = __va(table); table 904 arch/powerpc/mm/book3s64/hash_utils.c _SDR1 = table + __ilog2(htab_size_bytes) - 18; table 907 arch/powerpc/mm/book3s64/hash_utils.c memset((void *)table, 0, htab_size_bytes); table 913 arch/powerpc/mm/book3s64/hash_utils.c hash_init_partition_table(table, htab_size_bytes); table 351 arch/powerpc/mm/book3s64/pgtable.c static inline void pgtable_free(void *table, int index) table 355 arch/powerpc/mm/book3s64/pgtable.c pte_fragment_free(table, 0); table 358 arch/powerpc/mm/book3s64/pgtable.c pmd_fragment_free(table); table 361 arch/powerpc/mm/book3s64/pgtable.c kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); table 367 arch/powerpc/mm/book3s64/pgtable.c kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); table 372 arch/powerpc/mm/book3s64/pgtable.c kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); table 381 arch/powerpc/mm/book3s64/pgtable.c void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) table 383 arch/powerpc/mm/book3s64/pgtable.c unsigned long pgf = (unsigned long)table; table 392 arch/powerpc/mm/book3s64/pgtable.c void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); table 395 arch/powerpc/mm/book3s64/pgtable.c return pgtable_free(table, index); table 109 arch/powerpc/mm/pgtable-frag.c void pte_fragment_free(unsigned long *table, int kernel) table 111 arch/powerpc/mm/pgtable-frag.c struct page *page = virt_to_page(table); table 106 arch/powerpc/platforms/cell/iommu.c struct iommu_table table; table 161 arch/powerpc/platforms/cell/iommu.c container_of(tbl, struct iommu_window, table); table 206 arch/powerpc/platforms/cell/iommu.c container_of(tbl, struct iommu_window, table); table 480 arch/powerpc/platforms/cell/iommu.c window->table.it_blocksize = 16; table 481 arch/powerpc/platforms/cell/iommu.c window->table.it_base = (unsigned long)iommu->ptab; table 482 arch/powerpc/platforms/cell/iommu.c window->table.it_index = iommu->nid; table 483 arch/powerpc/platforms/cell/iommu.c window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; table 484 arch/powerpc/platforms/cell/iommu.c window->table.it_offset = table 485 arch/powerpc/platforms/cell/iommu.c (offset >> window->table.it_page_shift) + pte_offset; table 486 arch/powerpc/platforms/cell/iommu.c window->table.it_size = size >> window->table.it_page_shift; table 487 arch/powerpc/platforms/cell/iommu.c window->table.it_ops = &cell_iommu_ops; table 489 arch/powerpc/platforms/cell/iommu.c iommu_init_table(&window->table, iommu->nid, 0, 0); table 492 arch/powerpc/platforms/cell/iommu.c pr_debug("\tblocksize %ld\n", window->table.it_blocksize); table 493 arch/powerpc/platforms/cell/iommu.c pr_debug("\tbase 0x%016lx\n", window->table.it_base); table 494 arch/powerpc/platforms/cell/iommu.c pr_debug("\toffset 0x%lx\n", window->table.it_offset); table 495 arch/powerpc/platforms/cell/iommu.c pr_debug("\tsize %ld\n", window->table.it_size); table 514 arch/powerpc/platforms/cell/iommu.c __set_bit(0, window->table.it_map); table 515 arch/powerpc/platforms/cell/iommu.c tce_build_cell(&window->table, window->table.it_offset, 1, table 556 arch/powerpc/platforms/cell/iommu.c return &window->table; table 52 arch/s390/include/asm/gmap.h unsigned long *table; table 118 arch/s390/include/asm/gmap.h void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); table 53 arch/s390/include/asm/pgalloc.h unsigned long *table = crst_table_alloc(mm); table 55 arch/s390/include/asm/pgalloc.h if (table) table 56 arch/s390/include/asm/pgalloc.h crst_table_init(table, _REGION2_ENTRY_EMPTY); table 57 arch/s390/include/asm/pgalloc.h return (p4d_t *) table; table 68 arch/s390/include/asm/pgalloc.h unsigned long *table = crst_table_alloc(mm); table 69 arch/s390/include/asm/pgalloc.h if (table) table 70 arch/s390/include/asm/pgalloc.h crst_table_init(table, _REGION3_ENTRY_EMPTY); table 71 arch/s390/include/asm/pgalloc.h return (pud_t *) table; table 82 arch/s390/include/asm/pgalloc.h unsigned long *table = crst_table_alloc(mm); table 84 arch/s390/include/asm/pgalloc.h if (!table) table 86 arch/s390/include/asm/pgalloc.h crst_table_init(table, _SEGMENT_ENTRY_EMPTY); table 87 arch/s390/include/asm/pgalloc.h if (!pgtable_pmd_page_ctor(virt_to_page(table))) { table 88 arch/s390/include/asm/pgalloc.h crst_table_free(mm, table); table 91 arch/s390/include/asm/pgalloc.h return (pmd_t *) table; table 119 arch/s390/include/asm/pgalloc.h unsigned long *table = crst_table_alloc(mm); table 121 arch/s390/include/asm/pgalloc.h if (!table) table 125 arch/s390/include/asm/pgalloc.h if (!pgtable_pmd_page_ctor(virt_to_page(table))) { table 126 arch/s390/include/asm/pgalloc.h crst_table_free(mm, table); table 130 arch/s390/include/asm/pgalloc.h return (pgd_t *) table; table 581 arch/s390/include/asm/pgtable.h unsigned long table, unsigned long dtt, table 586 arch/s390/include/asm/pgtable.h register unsigned long reg4 asm("4") = table | dtt; table 869 arch/s390/kernel/debug.c static int s390dbf_procactive(struct ctl_table *table, int write, table 873 arch/s390/kernel/debug.c return proc_dointvec(table, write, buffer, lenp, ppos); table 113 arch/s390/mm/fault.c unsigned long *table = __va(asce & _ASCE_ORIGIN); table 118 arch/s390/mm/fault.c table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; table 119 arch/s390/mm/fault.c if (bad_address(table)) table 121 arch/s390/mm/fault.c pr_cont("R1:%016lx ", *table); table 122 arch/s390/mm/fault.c if (*table & _REGION_ENTRY_INVALID) table 124 arch/s390/mm/fault.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 127 arch/s390/mm/fault.c table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; table 128 arch/s390/mm/fault.c if (bad_address(table)) table 130 arch/s390/mm/fault.c pr_cont("R2:%016lx ", *table); table 131 arch/s390/mm/fault.c if (*table & _REGION_ENTRY_INVALID) table 133 arch/s390/mm/fault.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 136 arch/s390/mm/fault.c table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; table 137 arch/s390/mm/fault.c if (bad_address(table)) table 139 arch/s390/mm/fault.c pr_cont("R3:%016lx ", *table); table 140 arch/s390/mm/fault.c if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) table 142 arch/s390/mm/fault.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 145 arch/s390/mm/fault.c table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; table 146 arch/s390/mm/fault.c if (bad_address(table)) table 148 arch/s390/mm/fault.c pr_cont("S:%016lx ", *table); table 149 arch/s390/mm/fault.c if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) table 151 arch/s390/mm/fault.c table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); table 153 arch/s390/mm/fault.c table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; table 154 arch/s390/mm/fault.c if (bad_address(table)) table 156 arch/s390/mm/fault.c pr_cont("P:%016lx ", *table); table 39 arch/s390/mm/gmap.c unsigned long *table; table 76 arch/s390/mm/gmap.c table = (unsigned long *) page_to_phys(page); table 77 arch/s390/mm/gmap.c crst_table_init(table, etype); table 78 arch/s390/mm/gmap.c gmap->table = table; table 80 arch/s390/mm/gmap.c _ASCE_USER_BITS | __pa(table); table 305 arch/s390/mm/gmap.c static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, table 318 arch/s390/mm/gmap.c if (*table & _REGION_ENTRY_INVALID) { table 320 arch/s390/mm/gmap.c *table = (unsigned long) new | _REGION_ENTRY_LENGTH | table 321 arch/s390/mm/gmap.c (*table & _REGION_ENTRY_TYPE_MASK); table 511 arch/s390/mm/gmap.c void gmap_unlink(struct mm_struct *mm, unsigned long *table, table 543 arch/s390/mm/gmap.c unsigned long *table; table 554 arch/s390/mm/gmap.c table = gmap->table; table 556 arch/s390/mm/gmap.c table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT; table 557 arch/s390/mm/gmap.c if ((*table & _REGION_ENTRY_INVALID) && table 558 arch/s390/mm/gmap.c gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, table 561 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 564 arch/s390/mm/gmap.c table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; table 565 arch/s390/mm/gmap.c if ((*table & _REGION_ENTRY_INVALID) && table 566 arch/s390/mm/gmap.c gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, table 569 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 572 arch/s390/mm/gmap.c table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; table 573 arch/s390/mm/gmap.c if ((*table & _REGION_ENTRY_INVALID) && table 574 arch/s390/mm/gmap.c gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, table 577 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 579 arch/s390/mm/gmap.c table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; table 602 arch/s390/mm/gmap.c if (*table == _SEGMENT_ENTRY_EMPTY) { table 604 arch/s390/mm/gmap.c vmaddr >> PMD_SHIFT, table); table 607 arch/s390/mm/gmap.c *table = (pmd_val(*pmd) & table 611 arch/s390/mm/gmap.c *table = pmd_val(*pmd) & table 614 arch/s390/mm/gmap.c } else if (*table & _SEGMENT_ENTRY_PROTECT && table 616 arch/s390/mm/gmap.c unprot = (u64)*table; table 619 arch/s390/mm/gmap.c gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr); table 791 arch/s390/mm/gmap.c unsigned long *table; table 802 arch/s390/mm/gmap.c table = gmap->table; table 805 arch/s390/mm/gmap.c table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT; table 808 arch/s390/mm/gmap.c if (*table & _REGION_ENTRY_INVALID) table 810 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 813 arch/s390/mm/gmap.c table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; table 816 arch/s390/mm/gmap.c if (*table & _REGION_ENTRY_INVALID) table 818 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 821 arch/s390/mm/gmap.c table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; table 824 arch/s390/mm/gmap.c if (*table & _REGION_ENTRY_INVALID) table 826 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table 829 arch/s390/mm/gmap.c table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; table 832 arch/s390/mm/gmap.c if (*table & _REGION_ENTRY_INVALID) table 834 arch/s390/mm/gmap.c table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); table 835 arch/s390/mm/gmap.c table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT; table 837 arch/s390/mm/gmap.c return table; table 852 arch/s390/mm/gmap.c unsigned long *table; table 856 arch/s390/mm/gmap.c table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */ table 857 arch/s390/mm/gmap.c if (!table || *table & _SEGMENT_ENTRY_INVALID) table 859 arch/s390/mm/gmap.c return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); table 1288 arch/s390/mm/gmap.c unsigned long *table; table 1291 arch/s390/mm/gmap.c table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */ table 1292 arch/s390/mm/gmap.c if (!table || *table & _PAGE_INVALID) table 1295 arch/s390/mm/gmap.c ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table); table 1555 arch/s390/mm/gmap.c unsigned long *table; table 1563 arch/s390/mm/gmap.c table = (unsigned long *)(sg->asce & _ASCE_ORIGIN); table 1566 arch/s390/mm/gmap.c __gmap_unshadow_r1t(sg, 0, table); table 1569 arch/s390/mm/gmap.c __gmap_unshadow_r2t(sg, 0, table); table 1572 arch/s390/mm/gmap.c __gmap_unshadow_r3t(sg, 0, table); table 1575 arch/s390/mm/gmap.c __gmap_unshadow_sgt(sg, 0, table); table 1738 arch/s390/mm/gmap.c unsigned long *s_r2t, *table; table 1753 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */ table 1754 arch/s390/mm/gmap.c if (!table) { table 1758 arch/s390/mm/gmap.c if (!(*table & _REGION_ENTRY_INVALID)) { table 1761 arch/s390/mm/gmap.c } else if (*table & _REGION_ENTRY_ORIGIN) { table 1767 arch/s390/mm/gmap.c *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH | table 1770 arch/s390/mm/gmap.c *table |= (r2t & _REGION_ENTRY_PROTECT); table 1774 arch/s390/mm/gmap.c *table &= ~_REGION_ENTRY_INVALID; table 1787 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 4); table 1788 arch/s390/mm/gmap.c if (!table || (*table & _REGION_ENTRY_ORIGIN) != table 1792 arch/s390/mm/gmap.c *table &= ~_REGION_ENTRY_INVALID; table 1822 arch/s390/mm/gmap.c unsigned long *s_r3t, *table; table 1837 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */ table 1838 arch/s390/mm/gmap.c if (!table) { table 1842 arch/s390/mm/gmap.c if (!(*table & _REGION_ENTRY_INVALID)) { table 1845 arch/s390/mm/gmap.c } else if (*table & _REGION_ENTRY_ORIGIN) { table 1851 arch/s390/mm/gmap.c *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH | table 1854 arch/s390/mm/gmap.c *table |= (r3t & _REGION_ENTRY_PROTECT); table 1858 arch/s390/mm/gmap.c *table &= ~_REGION_ENTRY_INVALID; table 1871 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 3); table 1872 arch/s390/mm/gmap.c if (!table || (*table & _REGION_ENTRY_ORIGIN) != table 1876 arch/s390/mm/gmap.c *table &= ~_REGION_ENTRY_INVALID; table 1906 arch/s390/mm/gmap.c unsigned long *s_sgt, *table; table 1921 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */ table 1922 arch/s390/mm/gmap.c if (!table) { table 1926 arch/s390/mm/gmap.c if (!(*table & _REGION_ENTRY_INVALID)) { table 1929 arch/s390/mm/gmap.c } else if (*table & _REGION_ENTRY_ORIGIN) { table 1935 arch/s390/mm/gmap.c *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH | table 1938 arch/s390/mm/gmap.c *table |= sgt & _REGION_ENTRY_PROTECT; table 1942 arch/s390/mm/gmap.c *table &= ~_REGION_ENTRY_INVALID; table 1955 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 2); table 1956 arch/s390/mm/gmap.c if (!table || (*table & _REGION_ENTRY_ORIGIN) != table 1960 arch/s390/mm/gmap.c *table &= ~_REGION_ENTRY_INVALID; table 1990 arch/s390/mm/gmap.c unsigned long *table; table 1996 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */ table 1997 arch/s390/mm/gmap.c if (table && !(*table & _SEGMENT_ENTRY_INVALID)) { table 1999 arch/s390/mm/gmap.c page = pfn_to_page(*table >> PAGE_SHIFT); table 2001 arch/s390/mm/gmap.c *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT); table 2030 arch/s390/mm/gmap.c unsigned long *s_pgt, *table; table 2045 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */ table 2046 arch/s390/mm/gmap.c if (!table) { table 2050 arch/s390/mm/gmap.c if (!(*table & _SEGMENT_ENTRY_INVALID)) { table 2053 arch/s390/mm/gmap.c } else if (*table & _SEGMENT_ENTRY_ORIGIN) { table 2058 arch/s390/mm/gmap.c *table = (unsigned long) s_pgt | _SEGMENT_ENTRY | table 2063 arch/s390/mm/gmap.c *table &= ~_SEGMENT_ENTRY_INVALID; table 2074 arch/s390/mm/gmap.c table = gmap_table_walk(sg, saddr, 1); table 2075 arch/s390/mm/gmap.c if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != table 2079 arch/s390/mm/gmap.c *table &= ~_SEGMENT_ENTRY_INVALID; table 2239 arch/s390/mm/gmap.c unsigned long *table; table 2247 arch/s390/mm/gmap.c table = radix_tree_lookup(&gmap->host_to_guest, table 2249 arch/s390/mm/gmap.c if (table) table 2250 arch/s390/mm/gmap.c gaddr = __gmap_segment_gaddr(table) + offset; table 2252 arch/s390/mm/gmap.c if (!table) table 56 arch/s390/mm/kasan_init.c unsigned long *table; table 58 arch/s390/mm/kasan_init.c table = kasan_early_alloc_pages(CRST_ALLOC_ORDER); table 59 arch/s390/mm/kasan_init.c if (table) table 60 arch/s390/mm/kasan_init.c crst_table_init(table, val); table 61 arch/s390/mm/kasan_init.c return table; table 60 arch/s390/mm/pageattr.c unsigned long table, mask; table 75 arch/s390/mm/pageattr.c table = (unsigned long)old & mask; table 76 arch/s390/mm/pageattr.c crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce); table 64 arch/s390/mm/pgalloc.c void crst_table_free(struct mm_struct *mm, unsigned long *table) table 66 arch/s390/mm/pgalloc.c free_pages((unsigned long) table, 2); table 92 arch/s390/mm/pgalloc.c unsigned long *table, *pgd; table 100 arch/s390/mm/pgalloc.c table = crst_table_alloc(mm); table 101 arch/s390/mm/pgalloc.c if (!table) { table 108 arch/s390/mm/pgalloc.c crst_table_init(table, _REGION2_ENTRY_EMPTY); table 109 arch/s390/mm/pgalloc.c p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); table 110 arch/s390/mm/pgalloc.c mm->pgd = (pgd_t *) table; table 116 arch/s390/mm/pgalloc.c crst_table_init(table, _REGION1_ENTRY_EMPTY); table 117 arch/s390/mm/pgalloc.c pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); table 118 arch/s390/mm/pgalloc.c mm->pgd = (pgd_t *) table; table 171 arch/s390/mm/pgalloc.c u64 *table; table 175 arch/s390/mm/pgalloc.c table = (u64 *)page_to_phys(page); table 176 arch/s390/mm/pgalloc.c memset64(table, _PAGE_INVALID, PTRS_PER_PTE); table 177 arch/s390/mm/pgalloc.c memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); table 194 arch/s390/mm/pgalloc.c unsigned long *table; table 200 arch/s390/mm/pgalloc.c table = NULL; table 208 arch/s390/mm/pgalloc.c table = (unsigned long *) page_to_phys(page); table 211 arch/s390/mm/pgalloc.c table += PTRS_PER_PTE; table 218 arch/s390/mm/pgalloc.c if (table) table 219 arch/s390/mm/pgalloc.c return table; table 231 arch/s390/mm/pgalloc.c table = (unsigned long *) page_to_phys(page); table 235 arch/s390/mm/pgalloc.c memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); table 236 arch/s390/mm/pgalloc.c memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); table 240 arch/s390/mm/pgalloc.c memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); table 245 arch/s390/mm/pgalloc.c return table; table 248 arch/s390/mm/pgalloc.c void page_table_free(struct mm_struct *mm, unsigned long *table) table 253 arch/s390/mm/pgalloc.c page = pfn_to_page(__pa(table) >> PAGE_SHIFT); table 256 arch/s390/mm/pgalloc.c bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); table 275 arch/s390/mm/pgalloc.c void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, table 283 arch/s390/mm/pgalloc.c page = pfn_to_page(__pa(table) >> PAGE_SHIFT); table 285 arch/s390/mm/pgalloc.c gmap_unlink(mm, table, vmaddr); table 286 arch/s390/mm/pgalloc.c table = (unsigned long *) (__pa(table) | 3); table 287 arch/s390/mm/pgalloc.c tlb_remove_table(tlb, table); table 290 arch/s390/mm/pgalloc.c bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); table 299 arch/s390/mm/pgalloc.c table = (unsigned long *) (__pa(table) | (1U << bit)); table 300 arch/s390/mm/pgalloc.c tlb_remove_table(tlb, table); table 306 arch/s390/mm/pgalloc.c void *table = (void *)((unsigned long) _table ^ mask); table 307 arch/s390/mm/pgalloc.c struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); table 311 arch/s390/mm/pgalloc.c free_pages((unsigned long) table, 2); table 338 arch/s390/mm/pgalloc.c u64 *table; table 340 arch/s390/mm/pgalloc.c table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL); table 341 arch/s390/mm/pgalloc.c if (table) table 342 arch/s390/mm/pgalloc.c memset64(table, _PAGE_INVALID, PTRS_PER_PTE); table 343 arch/s390/mm/pgalloc.c return (unsigned long) table; table 346 arch/s390/mm/pgalloc.c static void base_pgt_free(unsigned long table) table 348 arch/s390/mm/pgalloc.c kmem_cache_free(base_pgt_cache, (void *) table); table 353 arch/s390/mm/pgalloc.c unsigned long table; table 355 arch/s390/mm/pgalloc.c table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER); table 356 arch/s390/mm/pgalloc.c if (table) table 357 arch/s390/mm/pgalloc.c crst_table_init((unsigned long *)table, val); table 358 arch/s390/mm/pgalloc.c return table; table 361 arch/s390/mm/pgalloc.c static void base_crst_free(unsigned long table) table 363 arch/s390/mm/pgalloc.c free_pages(table, CRST_ALLOC_ORDER); table 410 arch/s390/mm/pgalloc.c unsigned long *ste, next, table; table 420 arch/s390/mm/pgalloc.c table = base_pgt_alloc(); table 421 arch/s390/mm/pgalloc.c if (!table) table 423 arch/s390/mm/pgalloc.c *ste = table | _SEGMENT_ENTRY; table 425 arch/s390/mm/pgalloc.c table = *ste & _SEGMENT_ENTRY_ORIGIN; table 426 arch/s390/mm/pgalloc.c rc = base_page_walk(table, addr, next, alloc); table 430 arch/s390/mm/pgalloc.c base_pgt_free(table); table 439 arch/s390/mm/pgalloc.c unsigned long *rtte, next, table; table 449 arch/s390/mm/pgalloc.c table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY); table 450 arch/s390/mm/pgalloc.c if (!table) table 452 arch/s390/mm/pgalloc.c *rtte = table | _REGION3_ENTRY; table 454 arch/s390/mm/pgalloc.c table = *rtte & _REGION_ENTRY_ORIGIN; table 455 arch/s390/mm/pgalloc.c rc = base_segment_walk(table, addr, next, alloc); table 459 arch/s390/mm/pgalloc.c base_crst_free(table); table 467 arch/s390/mm/pgalloc.c unsigned long *rste, next, table; table 477 arch/s390/mm/pgalloc.c table = base_crst_alloc(_REGION3_ENTRY_EMPTY); table 478 arch/s390/mm/pgalloc.c if (!table) table 480 arch/s390/mm/pgalloc.c *rste = table | _REGION2_ENTRY; table 482 arch/s390/mm/pgalloc.c table = *rste & _REGION_ENTRY_ORIGIN; table 483 arch/s390/mm/pgalloc.c rc = base_region3_walk(table, addr, next, alloc); table 487 arch/s390/mm/pgalloc.c base_crst_free(table); table 495 arch/s390/mm/pgalloc.c unsigned long *rfte, next, table; table 505 arch/s390/mm/pgalloc.c table = base_crst_alloc(_REGION2_ENTRY_EMPTY); table 506 arch/s390/mm/pgalloc.c if (!table) table 508 arch/s390/mm/pgalloc.c *rfte = table | _REGION1_ENTRY; table 510 arch/s390/mm/pgalloc.c table = *rfte & _REGION_ENTRY_ORIGIN; table 511 arch/s390/mm/pgalloc.c rc = base_region2_walk(table, addr, next, alloc); table 515 arch/s390/mm/pgalloc.c base_crst_free(table); table 529 arch/s390/mm/pgalloc.c unsigned long table = asce & _ASCE_ORIGIN; table 535 arch/s390/mm/pgalloc.c base_segment_walk(table, 0, _REGION3_SIZE, 0); table 538 arch/s390/mm/pgalloc.c base_region3_walk(table, 0, _REGION2_SIZE, 0); table 541 arch/s390/mm/pgalloc.c base_region2_walk(table, 0, _REGION1_SIZE, 0); table 544 arch/s390/mm/pgalloc.c base_region1_walk(table, 0, -_PAGE_SIZE, 0); table 547 arch/s390/mm/pgalloc.c base_crst_free(table); table 581 arch/s390/mm/pgalloc.c unsigned long asce, table, end; table 588 arch/s390/mm/pgalloc.c table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY); table 589 arch/s390/mm/pgalloc.c if (!table) table 591 arch/s390/mm/pgalloc.c rc = base_segment_walk(table, addr, end, 1); table 592 arch/s390/mm/pgalloc.c asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH; table 594 arch/s390/mm/pgalloc.c table = base_crst_alloc(_REGION3_ENTRY_EMPTY); table 595 arch/s390/mm/pgalloc.c if (!table) table 597 arch/s390/mm/pgalloc.c rc = base_region3_walk(table, addr, end, 1); table 598 arch/s390/mm/pgalloc.c asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; table 600 arch/s390/mm/pgalloc.c table = base_crst_alloc(_REGION2_ENTRY_EMPTY); table 601 arch/s390/mm/pgalloc.c if (!table) table 603 arch/s390/mm/pgalloc.c rc = base_region2_walk(table, addr, end, 1); table 604 arch/s390/mm/pgalloc.c asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; table 606 arch/s390/mm/pgalloc.c table = base_crst_alloc(_REGION1_ENTRY_EMPTY); table 607 arch/s390/mm/pgalloc.c if (!table) table 609 arch/s390/mm/pgalloc.c rc = base_region1_walk(table, addr, end, 1); table 610 arch/s390/mm/pgalloc.c asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH; table 43 arch/s390/mm/vmem.c unsigned long *table; table 45 arch/s390/mm/vmem.c table = vmem_alloc_pages(CRST_ALLOC_ORDER); table 46 arch/s390/mm/vmem.c if (table) table 47 arch/s390/mm/vmem.c crst_table_init(table, val); table 48 arch/s390/mm/vmem.c return table; table 30 arch/s390/pci/pci_dma.c unsigned long *table, *entry; table 32 arch/s390/pci/pci_dma.c table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC); table 33 arch/s390/pci/pci_dma.c if (!table) table 36 arch/s390/pci/pci_dma.c for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) table 38 arch/s390/pci/pci_dma.c return table; table 41 arch/s390/pci/pci_dma.c static void dma_free_cpu_table(void *table) table 43 arch/s390/pci/pci_dma.c kmem_cache_free(dma_region_table_cache, table); table 48 arch/s390/pci/pci_dma.c unsigned long *table, *entry; table 50 arch/s390/pci/pci_dma.c table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC); table 51 arch/s390/pci/pci_dma.c if (!table) table 54 arch/s390/pci/pci_dma.c for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) table 56 arch/s390/pci/pci_dma.c return table; table 59 arch/s390/pci/pci_dma.c static void dma_free_page_table(void *table) table 61 arch/s390/pci/pci_dma.c kmem_cache_free(dma_page_table_cache, table); table 246 arch/s390/pci/pci_dma.c void dma_cleanup_tables(unsigned long *table) table 250 arch/s390/pci/pci_dma.c if (!table) table 254 arch/s390/pci/pci_dma.c if (reg_entry_isvalid(table[rtx])) table 255 arch/s390/pci/pci_dma.c dma_free_seg_table(table[rtx]); table 257 arch/s390/pci/pci_dma.c dma_free_cpu_table(table); table 246 arch/sh/boards/board-sh7785lcr.c .table = { table 264 arch/sh/boards/mach-ap325rxa/setup.c .table = { table 472 arch/sh/boards/mach-ecovec24/setup.c .table = { table 479 arch/sh/boards/mach-ecovec24/setup.c .table = { table 487 arch/sh/boards/mach-ecovec24/setup.c .table = { table 646 arch/sh/boards/mach-ecovec24/setup.c .table = { table 686 arch/sh/boards/mach-ecovec24/setup.c .table = { table 696 arch/sh/boards/mach-ecovec24/setup.c .table = { table 744 arch/sh/boards/mach-ecovec24/setup.c .table = { table 792 arch/sh/boards/mach-ecovec24/setup.c .table = { table 842 arch/sh/boards/mach-ecovec24/setup.c .table = { table 293 arch/sh/boards/mach-kfr2r09/setup.c .table = { table 351 arch/sh/boards/mach-migor/setup.c .table = { table 360 arch/sh/boards/mach-migor/setup.c .table = { table 14 arch/sparc/include/asm/agp.h #define free_gatt_pages(table, order) \ table 15 arch/sparc/include/asm/agp.h free_pages((unsigned long)(table), (order)) table 32 arch/sparc/include/asm/iommu_64.h void *table; /* IOTSB table base virtual addr*/ table 72 arch/sparc/include/asm/pgalloc_64.h void pgtable_free(void *table, bool is_page); table 79 arch/sparc/include/asm/pgalloc_64.h static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) table 81 arch/sparc/include/asm/pgalloc_64.h unsigned long pgf = (unsigned long)table; table 89 arch/sparc/include/asm/pgalloc_64.h void *table = (void *)((unsigned long)_table & ~0x1UL); table 94 arch/sparc/include/asm/pgalloc_64.h pgtable_free(table, is_page); table 97 arch/sparc/include/asm/pgalloc_64.h static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) table 99 arch/sparc/include/asm/pgalloc_64.h pgtable_free(table, is_page); table 257 arch/sparc/kernel/irq_32.c #define INSTANTIATE(table) \ table 258 arch/sparc/kernel/irq_32.c table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ table 259 arch/sparc/kernel/irq_32.c table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ table 261 arch/sparc/kernel/irq_32.c (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ table 262 arch/sparc/kernel/irq_32.c table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ table 263 arch/sparc/kernel/irq_32.c table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; table 1046 arch/sparc/kernel/ldc.c struct ldc_mtable_entry *table; table 1068 arch/sparc/kernel/ldc.c table = (struct ldc_mtable_entry *) table 1071 arch/sparc/kernel/ldc.c if (!table) { table 1077 arch/sparc/kernel/ldc.c memset(table, 0, PAGE_SIZE << order); table 1079 arch/sparc/kernel/ldc.c ldc_iommu->page_table = table; table 1081 arch/sparc/kernel/ldc.c hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), table 1090 arch/sparc/kernel/ldc.c free_pages((unsigned long) table, order); table 745 arch/sparc/kernel/pci_sun4v.c void *table; table 761 arch/sparc/kernel/pci_sun4v.c table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); table 762 arch/sparc/kernel/pci_sun4v.c if (!table) { table 766 arch/sparc/kernel/pci_sun4v.c iotsb->table = table; table 767 arch/sparc/kernel/pci_sun4v.c iotsb->ra = __pa(table); table 795 arch/sparc/kernel/pci_sun4v.c free_pages((unsigned long)table, order); table 2931 arch/sparc/mm/init_64.c void pgtable_free(void *table, bool is_page) table 2934 arch/sparc/mm/init_64.c __pte_free(table); table 2936 arch/sparc/mm/init_64.c kmem_cache_free(pgtable_cache, table); table 38 arch/x86/boot/compressed/acpi.c acpi_physical_address table; table 45 arch/x86/boot/compressed/acpi.c table = tbl->table; table 47 arch/x86/boot/compressed/acpi.c if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) { table 55 arch/x86/boot/compressed/acpi.c table = tbl->table; table 59 arch/x86/boot/compressed/acpi.c rsdp_addr = table; table 61 arch/x86/boot/compressed/acpi.c return table; table 377 arch/x86/boot/compressed/acpi.c unsigned long table_addr, table_end, table; table 393 arch/x86/boot/compressed/acpi.c table = table_addr + sizeof(struct acpi_table_srat); table 395 arch/x86/boot/compressed/acpi.c while (table + sizeof(struct acpi_subtable_header) < table_end) { table 397 arch/x86/boot/compressed/acpi.c sub_table = (struct acpi_subtable_header *)table; table 418 arch/x86/boot/compressed/acpi.c table += sub_table->length; table 33 arch/x86/boot/compressed/eboot.c efi_system_table_##bits##_t *table; \ table 35 arch/x86/boot/compressed/eboot.c table = (typeof(table))sys_table; \ table 37 arch/x86/boot/compressed/eboot.c c->runtime_services = table->runtime; \ table 38 arch/x86/boot/compressed/eboot.c c->boot_services = table->boottime; \ table 39 arch/x86/boot/compressed/eboot.c c->text_output = table->con_out; \ table 44 arch/x86/boot/compressed/eboot.c void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) table 397 arch/x86/boot/compressed/eboot.c sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; table 753 arch/x86/boot/compressed/eboot.c _table = (efi_system_table_t *)(unsigned long)efi_early->table; table 578 arch/x86/events/intel/pt.c struct topa_entry table[TENTS_PER_PAGE]; table 600 arch/x86/events/intel/pt.c ? &topa_to_page(t)->table[(t)->last] \ table 601 arch/x86/events/intel/pt.c : &topa_to_page(t)->table[(i)]) table 752 arch/x86/events/intel/pt.c pr_debug("# table @%p, off %llx size %zx\n", tp->table, table 756 arch/x86/events/intel/pt.c &tp->table[i], table 757 arch/x86/events/intel/pt.c (unsigned long)tp->table[i].base << TOPA_SHIFT, table 758 arch/x86/events/intel/pt.c sizes(tp->table[i].size), table 759 arch/x86/events/intel/pt.c tp->table[i].end ? 'E' : ' ', table 760 arch/x86/events/intel/pt.c tp->table[i].intr ? 'I' : ' ', table 761 arch/x86/events/intel/pt.c tp->table[i].stop ? 'S' : ' ', table 762 arch/x86/events/intel/pt.c *(u64 *)&tp->table[i]); table 764 arch/x86/events/intel/pt.c tp->table[i].stop) || table 765 arch/x86/events/intel/pt.c tp->table[i].end) table 970 arch/x86/events/intel/pt.c return &tp->table[idx]; table 978 arch/x86/events/intel/pt.c return &tp->table[idx]; table 994 arch/x86/events/intel/pt.c unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1); table 998 arch/x86/events/intel/pt.c tp = (struct topa_page *)table; table 999 arch/x86/events/intel/pt.c if (tp->table != te) table 1010 arch/x86/events/intel/pt.c return &tp->table[topa->last - 1]; table 1382 arch/x86/events/intel/pt.c pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, table 1447 arch/x86/events/intel/pt.c pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, table 29 arch/x86/include/asm/agp.h #define free_gatt_pages(table, order) \ table 30 arch/x86/include/asm/agp.h free_pages((unsigned long)(table), (order)) table 69 arch/x86/include/asm/cpu_device_id.h extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); table 22 arch/x86/include/asm/e820/api.h extern int e820__update_table(struct e820_table *table); table 203 arch/x86/include/asm/efi.h u64 table; table 224 arch/x86/include/asm/efi.h #define efi_table_attr(table, attr, instance) \ table 226 arch/x86/include/asm/efi.h ((table##_64_t *)(unsigned long)instance)->attr : \ table 227 arch/x86/include/asm/efi.h ((table##_32_t *)(unsigned long)instance)->attr) table 26 arch/x86/include/asm/intel-mid.h extern int __init sfi_parse_mrtc(struct sfi_table_header *table); table 27 arch/x86/include/asm/intel-mid.h extern int __init sfi_parse_mtmr(struct sfi_table_header *table); table 71 arch/x86/include/asm/paravirt.h static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) table 73 arch/x86/include/asm/paravirt.h PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); table 212 arch/x86/include/asm/paravirt_types.h void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); table 36 arch/x86/include/asm/tlb.h static inline void __tlb_remove_table(void *table) table 38 arch/x86/include/asm/tlb.h free_page_and_swap_cache(table); table 252 arch/x86/include/asm/xen/hypercall.h HYPERVISOR_set_trap_table(struct trap_info *table) table 254 arch/x86/include/asm/xen/hypercall.h return _hypercall1(int, set_trap_table, table); table 95 arch/x86/include/uapi/asm/bootparam.h __u8 table[14]; table 123 arch/x86/kernel/acpi/boot.c static int __init acpi_parse_madt(struct acpi_table_header *table) table 130 arch/x86/kernel/acpi/boot.c madt = (struct acpi_table_madt *)table; table 853 arch/x86/kernel/acpi/boot.c static int __init acpi_parse_sbf(struct acpi_table_header *table) table 855 arch/x86/kernel/acpi/boot.c struct acpi_table_boot *sb = (struct acpi_table_boot *)table; table 867 arch/x86/kernel/acpi/boot.c static int __init acpi_parse_hpet(struct acpi_table_header *table) table 869 arch/x86/kernel/acpi/boot.c struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table; table 954 arch/x86/kernel/acpi/boot.c static int __init acpi_parse_fadt(struct acpi_table_header *table) table 1117 arch/x86/kernel/cpu/common.c static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) table 1119 arch/x86/kernel/cpu/common.c const struct x86_cpu_id *m = x86_match_cpu(table); table 77 arch/x86/kernel/cpu/match.c bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table) table 79 arch/x86/kernel/cpu/match.c const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table); table 298 arch/x86/kernel/cpu/microcode/amd.c struct equiv_cpu_table table; table 309 arch/x86/kernel/cpu/microcode/amd.c table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); table 310 arch/x86/kernel/cpu/microcode/amd.c table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry); table 317 arch/x86/kernel/cpu/microcode/amd.c eq_id = find_equiv_id(&table, desc->cpuid_1_eax); table 77 arch/x86/kernel/e820.c static bool _e820__mapped_any(struct e820_table *table, table 82 arch/x86/kernel/e820.c for (i = 0; i < table->nr_entries; i++) { table 83 arch/x86/kernel/e820.c struct e820_entry *entry = &table->entries[i]; table 166 arch/x86/kernel/e820.c static void __init __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type) table 168 arch/x86/kernel/e820.c int x = table->nr_entries; table 170 arch/x86/kernel/e820.c if (x >= ARRAY_SIZE(table->entries)) { table 176 arch/x86/kernel/e820.c table->entries[x].addr = start; table 177 arch/x86/kernel/e820.c table->entries[x].size = size; table 178 arch/x86/kernel/e820.c table->entries[x].type = type; table 179 arch/x86/kernel/e820.c table->nr_entries++; table 307 arch/x86/kernel/e820.c int __init e820__update_table(struct e820_table *table) table 309 arch/x86/kernel/e820.c struct e820_entry *entries = table->entries; table 310 arch/x86/kernel/e820.c u32 max_nr_entries = ARRAY_SIZE(table->entries); table 317 arch/x86/kernel/e820.c if (table->nr_entries < 2) table 320 arch/x86/kernel/e820.c BUG_ON(table->nr_entries > max_nr_entries); table 323 arch/x86/kernel/e820.c for (i = 0; i < table->nr_entries; i++) { table 329 arch/x86/kernel/e820.c for (i = 0; i < 2 * table->nr_entries; i++) table 337 arch/x86/kernel/e820.c for (i = 0; i < table->nr_entries; i++) { table 402 arch/x86/kernel/e820.c table->nr_entries = new_nr_entries; table 448 arch/x86/kernel/e820.c __e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type) table 466 arch/x86/kernel/e820.c for (i = 0; i < table->nr_entries; i++) { table 467 arch/x86/kernel/e820.c struct e820_entry *entry = &table->entries[i]; table 485 arch/x86/kernel/e820.c __e820__range_add(table, start, size, new_type); table 486 arch/x86/kernel/e820.c __e820__range_add(table, end, entry_end - end, entry->type); table 498 arch/x86/kernel/e820.c __e820__range_add(table, final_start, final_end - final_start, new_type); table 41 arch/x86/kernel/itmt.c static int sched_itmt_update_handler(struct ctl_table *table, int write, table 56 arch/x86/kernel/itmt.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 402 arch/x86/kvm/irq_comm.c struct kvm_irq_routing_table *table; table 407 arch/x86/kvm/irq_comm.c table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); table 408 arch/x86/kvm/irq_comm.c nr_ioapic_pins = min_t(u32, table->nr_rt_entries, table 411 arch/x86/kvm/irq_comm.c hlist_for_each_entry(entry, &table->map[i], link) { table 137 arch/x86/kvm/paging_tmpl.h pt_element_t *table; table 142 arch/x86/kvm/paging_tmpl.h table = kmap_atomic(page); table 143 arch/x86/kvm/paging_tmpl.h ret = CMPXCHG(&table[index], orig_pte, new_pte); table 144 arch/x86/kvm/paging_tmpl.h kunmap_atomic(table); table 161 arch/x86/kvm/paging_tmpl.h table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); table 162 arch/x86/kvm/paging_tmpl.h if (!table) { table 166 arch/x86/kvm/paging_tmpl.h ret = CMPXCHG(&table[index], orig_pte, new_pte); table 167 arch/x86/kvm/paging_tmpl.h memunmap(table); table 1000 arch/x86/kvm/vmx/vmx.c struct desc_struct *table; table 1006 arch/x86/kvm/vmx/vmx.c table = get_current_gdt_ro(); table 1014 arch/x86/kvm/vmx/vmx.c table = (struct desc_struct *)segment_base(ldt_selector); table 1016 arch/x86/kvm/vmx/vmx.c v = get_desc_base(&table[selector >> 3]); table 29 arch/x86/lib/inat.c const insn_attr_t *table; table 34 arch/x86/lib/inat.c table = inat_escape_tables[n][0]; table 35 arch/x86/lib/inat.c if (!table) table 37 arch/x86/lib/inat.c if (inat_has_variant(table[opcode]) && lpfx_id) { table 38 arch/x86/lib/inat.c table = inat_escape_tables[n][lpfx_id]; table 39 arch/x86/lib/inat.c if (!table) table 42 arch/x86/lib/inat.c return table[opcode]; table 48 arch/x86/lib/inat.c const insn_attr_t *table; table 53 arch/x86/lib/inat.c table = inat_group_tables[n][0]; table 54 arch/x86/lib/inat.c if (!table) table 56 arch/x86/lib/inat.c if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) { table 57 arch/x86/lib/inat.c table = inat_group_tables[n][lpfx_id]; table 58 arch/x86/lib/inat.c if (!table) table 61 arch/x86/lib/inat.c return table[X86_MODRM_REG(modrm)] | table 68 arch/x86/lib/inat.c const insn_attr_t *table; table 72 arch/x86/lib/inat.c table = inat_avx_tables[vex_m][0]; table 73 arch/x86/lib/inat.c if (!table) table 75 arch/x86/lib/inat.c if (!inat_is_group(table[opcode]) && vex_p) { table 77 arch/x86/lib/inat.c table = inat_avx_tables[vex_m][vex_p]; table 78 arch/x86/lib/inat.c if (!table) table 81 arch/x86/lib/inat.c return table[opcode]; table 349 arch/x86/pci/pcbios.c struct irq_info *table; table 365 arch/x86/pci/pcbios.c opt.table = (struct irq_info *) page; table 642 arch/x86/platform/efi/efi_64.c u32 table = (u32)(unsigned long)efi.systab; \ table 645 arch/x86/platform/efi/efi_64.c rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \ table 547 arch/x86/platform/efi/quirks.c ((efi_config_table_64_t *)p)->table = data->smbios; table 57 arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c .table = { table 65 arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c struct gpiod_lookup_table *table = &bcm43xx_vmmc_gpio_table; table 66 arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c struct gpiod_lookup *lookup = table->table; table 70 arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c gpiod_add_lookup_table(table); table 25 arch/x86/platform/intel-mid/device_libs/platform_bt.c .table = { table 39 arch/x86/platform/intel-mid/device_libs/platform_bt.c struct gpiod_lookup_table *table = &tng_bt_sfi_gpio_table; table 40 arch/x86/platform/intel-mid/device_libs/platform_bt.c struct gpiod_lookup *lookup = table->table; table 49 arch/x86/platform/intel-mid/device_libs/platform_bt.c ddata->name = table->dev_id; table 55 arch/x86/platform/intel-mid/device_libs/platform_bt.c gpiod_add_lookup_table(table); table 71 arch/x86/platform/intel-mid/sfi.c int __init sfi_parse_mtmr(struct sfi_table_header *table) table 78 arch/x86/platform/intel-mid/sfi.c sb = (struct sfi_table_simple *)table; table 142 arch/x86/platform/intel-mid/sfi.c int __init sfi_parse_mrtc(struct sfi_table_header *table) table 150 arch/x86/platform/intel-mid/sfi.c sb = (struct sfi_table_simple *)table; table 182 arch/x86/platform/intel-mid/sfi.c static int __init sfi_parse_gpio(struct sfi_table_header *table) table 190 arch/x86/platform/intel-mid/sfi.c sb = (struct sfi_table_simple *)table; table 464 arch/x86/platform/intel-mid/sfi.c static int __init sfi_parse_devs(struct sfi_table_header *table) table 473 arch/x86/platform/intel-mid/sfi.c sb = (struct sfi_table_simple *)table; table 39 arch/x86/platform/sfi/sfi.c static int __init sfi_parse_cpus(struct sfi_table_header *table) table 46 arch/x86/platform/sfi/sfi.c sb = (struct sfi_table_simple *)table; table 62 arch/x86/platform/sfi/sfi.c static int __init sfi_parse_ioapic(struct sfi_table_header *table) table 72 arch/x86/platform/sfi/sfi.c sb = (struct sfi_table_simple *)table; table 75 arch/x86/power/hibernate.c static int get_e820_md5(struct e820_table *table, void *buf) table 96 arch/x86/power/hibernate.c sizeof(struct e820_entry) * table->nr_entries; table 98 arch/x86/power/hibernate.c if (crypto_shash_digest(desc, (u8 *)table, size, buf)) table 270 block/bio.c void bio_init(struct bio *bio, struct bio_vec *table, table 277 block/bio.c bio->bi_io_vec = table; table 1110 block/sed-opal.c static int generic_get_column(struct opal_dev *dev, const u8 *table, table 1115 block/sed-opal.c err = cmd_start(dev, table, opalmethod[OPAL_GET]); table 1142 block/sed-opal.c static int generic_get_table_info(struct opal_dev *dev, enum opal_uid table, table 1155 block/sed-opal.c memcpy(uid+half, opaluid[table], half); table 39 crypto/lrw.c struct gf128mul_64k *table; table 87 crypto/lrw.c if (ctx->table) table 88 crypto/lrw.c gf128mul_free_64k(ctx->table); table 91 crypto/lrw.c ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); table 92 crypto/lrw.c if (!ctx->table) table 99 crypto/lrw.c gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); table 239 crypto/lrw.c gf128mul_64k_bbe(&rctx->t, ctx->table); table 287 crypto/lrw.c if (ctx->table) table 288 crypto/lrw.c gf128mul_free_64k(ctx->table); table 30 drivers/acpi/acpi_configfs.c struct acpi_table *table; table 33 drivers/acpi/acpi_configfs.c table = container_of(cfg, struct acpi_table, cfg); table 35 drivers/acpi/acpi_configfs.c if (table->header) { table 50 drivers/acpi/acpi_configfs.c table = container_of(cfg, struct acpi_table, cfg); table 52 drivers/acpi/acpi_configfs.c table->header = kmemdup(header, header->length, GFP_KERNEL); table 53 drivers/acpi/acpi_configfs.c if (!table->header) table 56 drivers/acpi/acpi_configfs.c ret = acpi_load_table(table->header); table 58 drivers/acpi/acpi_configfs.c kfree(table->header); table 59 drivers/acpi/acpi_configfs.c table->header = NULL; table 67 drivers/acpi/acpi_configfs.c struct acpi_table *table = container_of(cfg, struct acpi_table, cfg); table 69 drivers/acpi/acpi_configfs.c if (!table->header) table 72 drivers/acpi/acpi_configfs.c return table->header; table 210 drivers/acpi/acpi_configfs.c struct acpi_table *table; table 212 drivers/acpi/acpi_configfs.c table = kzalloc(sizeof(*table), GFP_KERNEL); table 213 drivers/acpi/acpi_configfs.c if (!table) table 216 drivers/acpi/acpi_configfs.c config_item_init_type_name(&table->cfg, name, &acpi_table_type); table 217 drivers/acpi/acpi_configfs.c return &table->cfg; table 223 drivers/acpi/acpi_configfs.c struct acpi_table *table = container_of(cfg, struct acpi_table, cfg); table 226 drivers/acpi/acpi_configfs.c acpi_tb_unload_table(table->index); table 151 drivers/acpi/acpica/acapps.h ad_write_table(struct acpi_table_header *table, table 69 drivers/acpi/acpica/acconvert.h cv_init_file_tree(struct acpi_table_header *table, table 192 drivers/acpi/acpica/aclocal.h struct acpi_table_header *table; table 34 drivers/acpi/acpica/actables.h u8 flags, struct acpi_table_header *table); table 57 drivers/acpi/acpica/actables.h void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); table 83 drivers/acpi/acpica/actables.h acpi_tb_release_table(struct acpi_table_header *table, table 102 drivers/acpi/acpica/actables.h void acpi_tb_notify_table(u32 event, void *table); table 126 drivers/acpi/acpica/actables.h acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length); table 543 drivers/acpi/acpica/acutils.h u8 acpi_ut_is_aml_table(struct acpi_table_header *table); table 88 drivers/acpi/acpica/dbfileio.c struct acpi_table_header *table; table 94 drivers/acpi/acpica/dbfileio.c table = table_list_head->table; table 96 drivers/acpi/acpica/dbfileio.c status = acpi_load_table(table); table 101 drivers/acpi/acpica/dbfileio.c table->signature); table 112 drivers/acpi/acpica/dbfileio.c table->signature); table 162 drivers/acpi/acpica/dsinit.c struct acpi_table_header *table; table 196 drivers/acpi/acpica/dsinit.c status = acpi_get_table_by_index(table_index, &table); table 203 drivers/acpi/acpica/dsinit.c if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) { table 213 drivers/acpi/acpica/dsinit.c table->signature, table->oem_table_id, owner_id, table 466 drivers/acpi/acpica/dsopcode.c struct acpi_table_header *table; table 519 drivers/acpi/acpica/dsopcode.c status = acpi_get_table_by_index(table_index, &table); table 530 drivers/acpi/acpica/dsopcode.c obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table); table 531 drivers/acpi/acpica/dsopcode.c obj_desc->region.length = table->length; table 274 drivers/acpi/acpica/exconfig.c struct acpi_table_header *table; table 348 drivers/acpi/acpica/exconfig.c table = ACPI_ALLOCATE(length); table 349 drivers/acpi/acpica/exconfig.c if (!table) { table 356 drivers/acpi/acpica/exconfig.c ACPI_CAST_PTR(u8, table)); table 358 drivers/acpi/acpica/exconfig.c ACPI_FREE(table); table 395 drivers/acpi/acpica/exconfig.c table = ACPI_ALLOCATE(length); table 396 drivers/acpi/acpica/exconfig.c if (!table) { table 400 drivers/acpi/acpica/exconfig.c memcpy(table, table_header, length); table 412 drivers/acpi/acpica/exconfig.c status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table), table 420 drivers/acpi/acpica/exconfig.c ACPI_FREE(table); table 47 drivers/acpi/acpica/nsparse.c struct acpi_table_header *table; table 56 drivers/acpi/acpica/nsparse.c status = acpi_get_table_by_index(table_index, &table); table 63 drivers/acpi/acpica/nsparse.c if (table->length < sizeof(struct acpi_table_header)) { table 67 drivers/acpi/acpica/nsparse.c aml_start = (u8 *)table + sizeof(struct acpi_table_header); table 68 drivers/acpi/acpica/nsparse.c aml_length = table->length - sizeof(struct acpi_table_header); table 92 drivers/acpi/acpica/nsparse.c ACPI_GET_FUNCTION_NAME, table->signature, table, table 157 drivers/acpi/acpica/nsparse.c struct acpi_table_header *table; table 162 drivers/acpi/acpica/nsparse.c status = acpi_get_table_by_index(table_index, &table); table 169 drivers/acpi/acpica/nsparse.c if (table->length < sizeof(struct acpi_table_header)) { table 173 drivers/acpi/acpica/nsparse.c aml_start = (u8 *)table + sizeof(struct acpi_table_header); table 174 drivers/acpi/acpica/nsparse.c aml_length = table->length - sizeof(struct acpi_table_header); table 206 drivers/acpi/acpica/nsparse.c if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) && table 477 drivers/acpi/acpica/nsxfname.c struct acpi_table_header *table = table 498 drivers/acpi/acpica/nsxfname.c if (!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) && table 499 drivers/acpi/acpica/nsxfname.c !ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT)) { table 49 drivers/acpi/acpica/rsdump.c acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table); table 175 drivers/acpi/acpica/rsdump.c acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) table 184 drivers/acpi/acpica/rsdump.c count = table->offset; table 188 drivers/acpi/acpica/rsdump.c target = ACPI_ADD_PTR(u8, resource, table->offset); table 189 drivers/acpi/acpica/rsdump.c name = table->name; table 191 drivers/acpi/acpica/rsdump.c switch (table->opcode) { table 196 drivers/acpi/acpica/rsdump.c if (table->name) { table 206 drivers/acpi/acpica/rsdump.c ACPI_CAST_PTR(char, table->pointer)); table 218 drivers/acpi/acpica/rsdump.c if (table->pointer) { table 220 drivers/acpi/acpica/rsdump.c table->pointer[*target]); table 246 drivers/acpi/acpica/rsdump.c table->pointer[*target & 0x01]); table 252 drivers/acpi/acpica/rsdump.c table->pointer[*target & 0x03]); table 258 drivers/acpi/acpica/rsdump.c table->pointer[*target & 0x07]); table 365 drivers/acpi/acpica/rsdump.c table->opcode); table 369 drivers/acpi/acpica/rsdump.c table++; table 45 drivers/acpi/acpica/tbdata.c struct acpi_table_header *table; table 51 drivers/acpi/acpica/tbdata.c &table, &table_length, &table_flags); table 61 drivers/acpi/acpica/tbdata.c memcmp(table_desc->pointer, table, table_length)) ? table 66 drivers/acpi/acpica/tbdata.c acpi_tb_release_table(table, table_length, table_flags); table 88 drivers/acpi/acpica/tbdata.c u8 flags, struct acpi_table_header *table) table 97 drivers/acpi/acpica/tbdata.c table_desc->length = table->length; table 99 drivers/acpi/acpica/tbdata.c ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature); table 123 drivers/acpi/acpica/tbdata.c struct acpi_table_header *table = NULL; table 128 drivers/acpi/acpica/tbdata.c table = table 135 drivers/acpi/acpica/tbdata.c table = ACPI_CAST_PTR(struct acpi_table_header, table 147 drivers/acpi/acpica/tbdata.c if (!table) { table 153 drivers/acpi/acpica/tbdata.c *table_ptr = table; table 174 drivers/acpi/acpica/tbdata.c acpi_tb_release_table(struct acpi_table_header *table, table 181 drivers/acpi/acpica/tbdata.c acpi_os_unmap_memory(table, table_length); table 920 drivers/acpi/acpica/tbdata.c struct acpi_table_header *table; table 930 drivers/acpi/acpica/tbdata.c status = acpi_get_table_by_index(table_index, &table); table 949 drivers/acpi/acpica/tbdata.c acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table); table 1009 drivers/acpi/acpica/tbdata.c struct acpi_table_header *table; table 1021 drivers/acpi/acpica/tbdata.c status = acpi_get_table_by_index(table_index, &table); table 1023 drivers/acpi/acpica/tbdata.c acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table); table 1053 drivers/acpi/acpica/tbdata.c void acpi_tb_notify_table(u32 event, void *table) table 1058 drivers/acpi/acpica/tbdata.c (void)acpi_gbl_table_handler(event, table, table 279 drivers/acpi/acpica/tbfadt.c struct acpi_table_header *table; table 291 drivers/acpi/acpica/tbfadt.c status = acpi_tb_get_table(fadt_desc, &table); table 301 drivers/acpi/acpica/tbfadt.c (void)acpi_tb_verify_checksum(table, length); table 305 drivers/acpi/acpica/tbfadt.c acpi_tb_create_local_fadt(table, length); table 354 drivers/acpi/acpica/tbfadt.c void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) table 365 drivers/acpi/acpica/tbfadt.c table->revision, ACPI_FADT_CONFORMANCE, table 376 drivers/acpi/acpica/tbfadt.c memcpy(&acpi_gbl_FADT, table, table 201 drivers/acpi/acpica/tbinstal.c struct acpi_table_header *table; table 208 drivers/acpi/acpica/tbinstal.c status = acpi_os_table_override(old_table_desc->pointer, &table); table 209 drivers/acpi/acpica/tbinstal.c if (ACPI_SUCCESS(status) && table) { table 211 drivers/acpi/acpica/tbinstal.c ACPI_PTR_TO_PHYSADDR(table), table 152 drivers/acpi/acpica/tbprint.c acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) table 161 drivers/acpi/acpica/tbprint.c if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_S3PT) || table 162 drivers/acpi/acpica/tbprint.c ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_FACS)) { table 168 drivers/acpi/acpica/tbprint.c checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length); table 176 drivers/acpi/acpica/tbprint.c table->signature, table->checksum, table 177 drivers/acpi/acpica/tbprint.c (u8)(table->checksum - checksum))); table 228 drivers/acpi/acpica/tbutils.c struct acpi_table_header *table; table 274 drivers/acpi/acpica/tbutils.c table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); table 275 drivers/acpi/acpica/tbutils.c if (!table) { table 279 drivers/acpi/acpica/tbutils.c acpi_tb_print_table_header(address, table); table 285 drivers/acpi/acpica/tbutils.c length = table->length; table 286 drivers/acpi/acpica/tbutils.c acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); table 295 drivers/acpi/acpica/tbutils.c table = acpi_os_map_memory(address, length); table 296 drivers/acpi/acpica/tbutils.c if (!table) { table 302 drivers/acpi/acpica/tbutils.c status = acpi_tb_verify_checksum(table, length); table 304 drivers/acpi/acpica/tbutils.c acpi_os_unmap_memory(table, length); table 310 drivers/acpi/acpica/tbutils.c table_count = (u32)((table->length - sizeof(struct acpi_table_header)) / table 312 drivers/acpi/acpica/tbutils.c table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header)); table 347 drivers/acpi/acpica/tbutils.c acpi_os_unmap_memory(table, length); table 359 drivers/acpi/acpica/tbxface.c void acpi_put_table(struct acpi_table_header *table) table 366 drivers/acpi/acpica/tbxface.c if (!table) { table 377 drivers/acpi/acpica/tbxface.c if (table_desc->pointer != table) { table 106 drivers/acpi/acpica/tbxfload.c struct acpi_table_desc *table; table 118 drivers/acpi/acpica/tbxfload.c table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index]; table 121 drivers/acpi/acpica/tbxfload.c !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) || table 122 drivers/acpi/acpica/tbxfload.c ACPI_FAILURE(acpi_tb_validate_table(table))) { table 133 drivers/acpi/acpica/tbxfload.c acpi_gbl_DSDT = table->pointer; table 170 drivers/acpi/acpica/tbxfload.c table = &acpi_gbl_root_table_list.tables[i]; table 172 drivers/acpi/acpica/tbxfload.c if (!table->address || table 174 drivers/acpi/acpica/tbxfload.c (table->signature.ascii, ACPI_SIG_SSDT) table 175 drivers/acpi/acpica/tbxfload.c && !ACPI_COMPARE_NAMESEG(table->signature.ascii, table 177 drivers/acpi/acpica/tbxfload.c && !ACPI_COMPARE_NAMESEG(table->signature.ascii, table 179 drivers/acpi/acpica/tbxfload.c || ACPI_FAILURE(acpi_tb_validate_table(table))) { table 191 drivers/acpi/acpica/tbxfload.c table->signature.ascii, table 192 drivers/acpi/acpica/tbxfload.c table->pointer->oem_table_id)); table 198 drivers/acpi/acpica/tbxfload.c table->signature.ascii, table 199 drivers/acpi/acpica/tbxfload.c table->pointer->oem_table_id)); table 281 drivers/acpi/acpica/tbxfload.c acpi_status acpi_load_table(struct acpi_table_header *table) table 290 drivers/acpi/acpica/tbxfload.c if (!table) { table 297 drivers/acpi/acpica/tbxfload.c status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table), table 57 drivers/acpi/acpica/utmisc.c u8 acpi_ut_is_aml_table(struct acpi_table_header *table) table 62 drivers/acpi/acpica/utmisc.c if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) || table 63 drivers/acpi/acpica/utmisc.c ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_PSDT) || table 64 drivers/acpi/acpica/utmisc.c ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT) || table 65 drivers/acpi/acpica/utmisc.c ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) || table 66 drivers/acpi/acpica/utmisc.c ACPI_IS_OEM_SIG(table->signature)) { table 155 drivers/acpi/arm64/gtdt.c int __init acpi_gtdt_init(struct acpi_table_header *table, table 161 drivers/acpi/arm64/gtdt.c gtdt = container_of(table, struct acpi_table_gtdt, header); table 163 drivers/acpi/arm64/gtdt.c acpi_gtdt_desc.gtdt_end = (void *)table + table->length; table 168 drivers/acpi/arm64/gtdt.c if (table->revision < 2) { table 170 drivers/acpi/arm64/gtdt.c table->revision); table 180 drivers/acpi/arm64/gtdt.c if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) { table 377 drivers/acpi/arm64/gtdt.c struct acpi_table_header *table; table 383 drivers/acpi/arm64/gtdt.c if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_GTDT, 0, &table))) table 395 drivers/acpi/arm64/gtdt.c ret = acpi_gtdt_init(table, &timer_count); table 81 drivers/acpi/bgrt.c int __init acpi_parse_bgrt(struct acpi_table_header *table) table 83 drivers/acpi/bgrt.c efi_bgrt_init(table); table 1113 drivers/acpi/bus.c static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context) table 1115 drivers/acpi/bus.c acpi_scan_table_handler(event, table, context); table 1117 drivers/acpi/bus.c return acpi_sysfs_table_handler(event, table, context); table 31 drivers/acpi/custom_method.c struct acpi_table_header table; table 43 drivers/acpi/custom_method.c if (copy_from_user(&table, user_buf, table 46 drivers/acpi/custom_method.c uncopied_bytes = max_size = table.length; table 88 drivers/acpi/internal.h acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); table 89 drivers/acpi/internal.h void acpi_scan_table_handler(u32 event, void *table, void *context); table 926 drivers/acpi/nfit/core.c struct nfit_table_prev *prev, void *table, const void *end) table 932 drivers/acpi/nfit/core.c if (table >= end) table 935 drivers/acpi/nfit/core.c hdr = table; table 944 drivers/acpi/nfit/core.c if (!add_spa(acpi_desc, prev, table)) table 948 drivers/acpi/nfit/core.c if (!add_memdev(acpi_desc, prev, table)) table 952 drivers/acpi/nfit/core.c if (!add_dcr(acpi_desc, prev, table)) table 956 drivers/acpi/nfit/core.c if (!add_bdw(acpi_desc, prev, table)) table 960 drivers/acpi/nfit/core.c if (!add_idt(acpi_desc, prev, table)) table 964 drivers/acpi/nfit/core.c if (!add_flush(acpi_desc, prev, table)) table 971 drivers/acpi/nfit/core.c if (!add_platform_cap(acpi_desc, table)) table 979 drivers/acpi/nfit/core.c return table + hdr->length; table 3579 drivers/acpi/nfit/core.c static void acpi_nfit_put_table(void *table) table 3581 drivers/acpi/nfit/core.c acpi_put_table(table); table 308 drivers/acpi/numa.c static int __init acpi_parse_slit(struct acpi_table_header *table) table 310 drivers/acpi/numa.c struct acpi_table_slit *slit = (struct acpi_table_slit *)table; table 401 drivers/acpi/numa.c static int __init acpi_parse_srat(struct acpi_table_header *table) table 403 drivers/acpi/numa.c struct acpi_table_srat *srat = (struct acpi_table_srat *)table; table 147 drivers/acpi/pci_root.c struct pci_osc_bit_struct *table, int size) table 154 drivers/acpi/pci_root.c for (i = 0, entry = table; i < size; i++, entry++) table 34 drivers/acpi/pmic/intel_pmic.c static int pmic_get_reg_bit(int address, struct pmic_table *table, table 40 drivers/acpi/pmic/intel_pmic.c if (table[i].address == address) { table 41 drivers/acpi/pmic/intel_pmic.c *reg = table[i].reg; table 43 drivers/acpi/pmic/intel_pmic.c *bit = table[i].bit; table 198 drivers/acpi/pmic/tps68470_pmic.c const struct tps68470_pmic_table *table, table 211 drivers/acpi/pmic/tps68470_pmic.c *reg = table[i].reg; table 212 drivers/acpi/pmic/tps68470_pmic.c *bitmask = table[i].bitmask; table 409 drivers/acpi/pptt.c static void cache_setup_acpi_cpu(struct acpi_table_header *table, table 421 drivers/acpi/pptt.c found_cache = acpi_find_cache_node(table, acpi_cpu_id, table 498 drivers/acpi/pptt.c static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, table 504 drivers/acpi/pptt.c cpu_node = acpi_find_processor_node(table, acpi_cpu_id); table 506 drivers/acpi/pptt.c cpu_node = acpi_find_processor_tag(table, cpu_node, table 517 drivers/acpi/pptt.c return ACPI_PTR_DIFF(cpu_node, table); table 526 drivers/acpi/pptt.c struct acpi_table_header *table; table 530 drivers/acpi/pptt.c status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); table 535 drivers/acpi/pptt.c retval = topology_get_acpi_cpu_tag(table, cpu, level, flag); table 538 drivers/acpi/pptt.c acpi_put_table(table); table 558 drivers/acpi/pptt.c struct acpi_table_header *table; table 564 drivers/acpi/pptt.c status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); table 570 drivers/acpi/pptt.c if (table->revision >= rev) table 571 drivers/acpi/pptt.c cpu_node = acpi_find_processor_node(table, acpi_cpu_id); table 576 drivers/acpi/pptt.c acpi_put_table(table); table 594 drivers/acpi/pptt.c struct acpi_table_header *table; table 601 drivers/acpi/pptt.c status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); table 605 drivers/acpi/pptt.c number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); table 606 drivers/acpi/pptt.c acpi_put_table(table); table 628 drivers/acpi/pptt.c struct acpi_table_header *table; table 633 drivers/acpi/pptt.c status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); table 639 drivers/acpi/pptt.c cache_setup_acpi_cpu(table, cpu); table 640 drivers/acpi/pptt.c acpi_put_table(table); table 694 drivers/acpi/pptt.c struct acpi_table_header *table; table 701 drivers/acpi/pptt.c status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); table 707 drivers/acpi/pptt.c found_cache = acpi_find_cache_node(table, acpi_cpu_id, table 712 drivers/acpi/pptt.c ret = ACPI_PTR_DIFF(cpu_node, table); table 714 drivers/acpi/pptt.c acpi_put_table(table); table 2296 drivers/acpi/scan.c void *table; table 2315 drivers/acpi/scan.c void acpi_scan_table_handler(u32 event, void *table, void *context) table 2330 drivers/acpi/scan.c tew->table = table; table 90 drivers/acpi/spcr.c struct acpi_table_spcr *table; table 101 drivers/acpi/spcr.c (struct acpi_table_header **)&table); table 106 drivers/acpi/spcr.c if (table->header.revision < 2) table 107 drivers/acpi/spcr.c pr_info("SPCR table version %d\n", table->header.revision); table 109 drivers/acpi/spcr.c if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { table 111 drivers/acpi/spcr.c table->serial_port.access_width))) { table 128 drivers/acpi/spcr.c switch (table->interface_type) { table 146 drivers/acpi/spcr.c switch (table->baud_rate) { table 189 drivers/acpi/spcr.c if (qdf2400_erratum_44_present(&table->header)) { table 195 drivers/acpi/spcr.c if (xgene_8250_erratum_present(table)) { table 207 drivers/acpi/spcr.c table->serial_port.address); table 210 drivers/acpi/spcr.c table->serial_port.address, baud_rate); table 223 drivers/acpi/spcr.c acpi_put_table((struct acpi_table_header *)table); table 403 drivers/acpi/sysfs.c acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) table 415 drivers/acpi/sysfs.c table_attr, table)) { table 425 drivers/acpi/tables.c struct acpi_table_header *table = NULL; table 434 drivers/acpi/tables.c acpi_get_table(id, acpi_apic_instance, &table); table 436 drivers/acpi/tables.c acpi_get_table(id, 0, &table); table 438 drivers/acpi/tables.c if (table) { table 439 drivers/acpi/tables.c handler(table); table 440 drivers/acpi/tables.c acpi_put_table(table); table 453 drivers/acpi/tables.c struct acpi_table_header *table = NULL; table 455 drivers/acpi/tables.c acpi_get_table(ACPI_SIG_MADT, 2, &table); table 456 drivers/acpi/tables.c if (table) { table 462 drivers/acpi/tables.c acpi_put_table(table); table 470 drivers/acpi/tables.c static void acpi_table_taint(struct acpi_table_header *table) table 473 drivers/acpi/tables.c table->signature, table->oem_table_id); table 520 drivers/acpi/tables.c struct acpi_table_header *table; table 549 drivers/acpi/tables.c table = file.data; table 552 drivers/acpi/tables.c if (!memcmp(table->signature, table_sigs[sig], 4)) table 560 drivers/acpi/tables.c if (file.size != table->length) { table 565 drivers/acpi/tables.c if (acpi_table_checksum(file.data, table->length)) { table 572 drivers/acpi/tables.c table->signature, cpio_path, file.name, table->length); table 574 drivers/acpi/tables.c all_tables_size += table->length; table 643 drivers/acpi/tables.c struct acpi_table_header *table; table 652 drivers/acpi/tables.c table = acpi_os_map_memory(acpi_tables_addr + table_offset, table 654 drivers/acpi/tables.c if (table_offset + table->length > all_tables_size) { table 655 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 660 drivers/acpi/tables.c table_length = table->length; table 663 drivers/acpi/tables.c if (memcmp(existing_table->signature, table->signature, 4) || table 664 drivers/acpi/tables.c memcmp(table->oem_id, existing_table->oem_id, table 666 drivers/acpi/tables.c memcmp(table->oem_table_id, existing_table->oem_table_id, table 668 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 676 drivers/acpi/tables.c existing_table->oem_revision >= table->oem_revision) { table 677 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 684 drivers/acpi/tables.c table->signature, table->oem_id, table 685 drivers/acpi/tables.c table->oem_table_id); table 686 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 701 drivers/acpi/tables.c struct acpi_table_header *table; table 707 drivers/acpi/tables.c table = acpi_os_map_memory(acpi_tables_addr + table_offset, table 709 drivers/acpi/tables.c if (table_offset + table->length > all_tables_size) { table 710 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 715 drivers/acpi/tables.c table_length = table->length; table 718 drivers/acpi/tables.c if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) || table 719 drivers/acpi/tables.c ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) { table 720 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 729 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 734 drivers/acpi/tables.c table->signature, table->oem_id, table 735 drivers/acpi/tables.c table->oem_table_id); table 736 drivers/acpi/tables.c acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); table 851 drivers/acpi/utils.c if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr))) table 29 drivers/amba/bus.c amba_cs_uci_id_match(const struct amba_id *table, struct amba_device *dev) table 34 drivers/amba/bus.c uci = table->data; table 47 drivers/amba/bus.c amba_lookup(const struct amba_id *table, struct amba_device *dev) table 49 drivers/amba/bus.c while (table->mask) { table 50 drivers/amba/bus.c if (((dev->periphid & table->mask) == table->id) && table 52 drivers/amba/bus.c (amba_cs_uci_id_match(table, dev)))) table 53 drivers/amba/bus.c return table; table 54 drivers/amba/bus.c table++; table 96 drivers/ata/libata-transport.c #define ata_bitfield_name_match(title, table) \ table 104 drivers/ata/libata-transport.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 105 drivers/ata/libata-transport.c if (table[i].value & table_key) { \ table 107 drivers/ata/libata-transport.c prefix, table[i].name); \ table 115 drivers/ata/libata-transport.c #define ata_bitfield_name_search(title, table) \ table 122 drivers/ata/libata-transport.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 123 drivers/ata/libata-transport.c if (table[i].value == table_key) { \ table 125 drivers/ata/libata-transport.c table[i].name); \ table 519 drivers/ata/pata_macio.c struct dbdma_cmd *table; table 528 drivers/ata/pata_macio.c table = (struct dbdma_cmd *) priv->dma_table_cpu; table 546 drivers/ata/pata_macio.c table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE); table 547 drivers/ata/pata_macio.c table->req_count = cpu_to_le16(len); table 548 drivers/ata/pata_macio.c table->phy_addr = cpu_to_le32(addr); table 549 drivers/ata/pata_macio.c table->cmd_dep = 0; table 550 drivers/ata/pata_macio.c table->xfer_status = 0; table 551 drivers/ata/pata_macio.c table->res_count = 0; table 554 drivers/ata/pata_macio.c ++table; table 562 drivers/ata/pata_macio.c table--; table 563 drivers/ata/pata_macio.c table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST); table 564 drivers/ata/pata_macio.c table++; table 567 drivers/ata/pata_macio.c memset(table, 0, sizeof(struct dbdma_cmd)); table 568 drivers/ata/pata_macio.c table->command = cpu_to_le16(DBDMA_STOP); table 226 drivers/base/devcoredump.c struct scatterlist *table = data; table 233 drivers/base/devcoredump.c return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, table 329 drivers/base/devcoredump.c void dev_coredumpsg(struct device *dev, struct scatterlist *table, table 332 drivers/base/devcoredump.c dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable, table 74 drivers/base/regmap/regmap.c const struct regmap_access_table *table) table 77 drivers/base/regmap/regmap.c if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) table 81 drivers/base/regmap/regmap.c if (!table->n_yes_ranges) table 84 drivers/base/regmap/regmap.c return regmap_reg_in_ranges(reg, table->yes_ranges, table 85 drivers/base/regmap/regmap.c table->n_yes_ranges); table 61 drivers/block/zram/zram_drv.c return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); table 66 drivers/block/zram/zram_drv.c bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); table 71 drivers/block/zram/zram_drv.c bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); table 86 drivers/block/zram/zram_drv.c return zram->table[index].handle; table 91 drivers/block/zram/zram_drv.c zram->table[index].handle = handle; table 98 drivers/block/zram/zram_drv.c return zram->table[index].flags & BIT(flag); table 104 drivers/block/zram/zram_drv.c zram->table[index].flags |= BIT(flag); table 110 drivers/block/zram/zram_drv.c zram->table[index].flags &= ~BIT(flag); table 116 drivers/block/zram/zram_drv.c zram->table[index].element = element; table 121 drivers/block/zram/zram_drv.c return zram->table[index].element; table 126 drivers/block/zram/zram_drv.c return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); table 132 drivers/block/zram/zram_drv.c unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; table 134 drivers/block/zram/zram_drv.c zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; table 859 drivers/block/zram/zram_drv.c zram->table[index].ac_time = ktime_get_boottime(); table 889 drivers/block/zram/zram_drv.c ts = ktime_to_timespec64(zram->table[index].ac_time); table 1137 drivers/block/zram/zram_drv.c vfree(zram->table); table 1145 drivers/block/zram/zram_drv.c zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); table 1146 drivers/block/zram/zram_drv.c if (!zram->table) table 1151 drivers/block/zram/zram_drv.c vfree(zram->table); table 1170 drivers/block/zram/zram_drv.c zram->table[index].ac_time = 0; table 1208 drivers/block/zram/zram_drv.c WARN_ON_ONCE(zram->table[index].flags & table 93 drivers/block/zram/zram_drv.h struct zram_table_entry *table; table 46 drivers/bus/mips_cdmm.c mips_cdmm_lookup(const struct mips_cdmm_device_id *table, table 51 drivers/bus/mips_cdmm.c for (; table->type; ++table) { table 52 drivers/bus/mips_cdmm.c ret = (dev->type == table->type); table 57 drivers/bus/mips_cdmm.c return ret ? table : NULL; table 851 drivers/char/agp/generic.c char *table; table 864 drivers/char/agp/generic.c table = NULL; table 897 drivers/char/agp/generic.c table = alloc_gatt_pages(page_order); table 899 drivers/char/agp/generic.c if (table == NULL) { table 921 drivers/char/agp/generic.c } while (!table && (i < bridge->driver->num_aperture_sizes)); table 926 drivers/char/agp/generic.c table = alloc_gatt_pages(page_order); table 929 drivers/char/agp/generic.c if (table == NULL) table 932 drivers/char/agp/generic.c table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); table 934 drivers/char/agp/generic.c for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) table 937 drivers/char/agp/generic.c bridge->gatt_table_real = (u32 *) table; table 938 drivers/char/agp/generic.c agp_gatt_table = (void *)table; table 942 drivers/char/agp/generic.c if (set_memory_uc((unsigned long)table, 1 << page_order)) table 945 drivers/char/agp/generic.c bridge->gatt_table = (u32 __iomem *)table; table 947 drivers/char/agp/generic.c bridge->gatt_table = ioremap_nocache(virt_to_phys(table), table 953 drivers/char/agp/generic.c for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) table 956 drivers/char/agp/generic.c free_gatt_pages(table, page_order); table 975 drivers/char/agp/generic.c char *table, *table_end; table 1011 drivers/char/agp/generic.c table = (char *) bridge->gatt_table_real; table 1012 drivers/char/agp/generic.c table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); table 1014 drivers/char/agp/generic.c for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) table 370 drivers/char/agp/uninorth-agp.c char *table; table 383 drivers/char/agp/uninorth-agp.c table = NULL; table 393 drivers/char/agp/uninorth-agp.c table = (char *) __get_free_pages(GFP_KERNEL, page_order); table 395 drivers/char/agp/uninorth-agp.c if (table == NULL) { table 401 drivers/char/agp/uninorth-agp.c } while (!table && (i < bridge->driver->num_aperture_sizes)); table 403 drivers/char/agp/uninorth-agp.c if (table == NULL) table 412 drivers/char/agp/uninorth-agp.c table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); table 414 drivers/char/agp/uninorth-agp.c for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end); table 420 drivers/char/agp/uninorth-agp.c bridge->gatt_table_real = (u32 *) table; table 422 drivers/char/agp/uninorth-agp.c flush_dcache_range((unsigned long)table, table 429 drivers/char/agp/uninorth-agp.c bridge->gatt_bus_addr = virt_to_phys(table); table 443 drivers/char/agp/uninorth-agp.c if (table) table 444 drivers/char/agp/uninorth-agp.c free_pages((unsigned long)table, page_order); table 451 drivers/char/agp/uninorth-agp.c char *table, *table_end; table 465 drivers/char/agp/uninorth-agp.c table = (char *) bridge->gatt_table_real; table 466 drivers/char/agp/uninorth-agp.c table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); table 468 drivers/char/agp/uninorth-agp.c for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) table 2231 drivers/char/random.c static int proc_do_uuid(struct ctl_table *table, int write, table 2237 drivers/char/random.c uuid = table->data; table 2261 drivers/char/random.c static int proc_do_entropy(struct ctl_table *table, int write, table 2267 drivers/char/random.c entropy_count = *(int *)table->data >> ENTROPY_SHIFT; table 22 drivers/clk/actions/owl-divider.c div_hw->table, div_hw->width, table 47 drivers/clk/actions/owl-divider.c val, div_hw->table, table 69 drivers/clk/actions/owl-divider.c val = divider_get_val(rate, parent_rate, div_hw->table, table 21 drivers/clk/actions/owl-divider.h struct clk_div_table *table; table 35 drivers/clk/actions/owl-divider.h .table = _table, \ table 17 drivers/clk/actions/owl-factor.c static unsigned int _get_table_maxval(const struct clk_factor_table *table) table 22 drivers/clk/actions/owl-factor.c for (clkt = table; clkt->div; clkt++) table 28 drivers/clk/actions/owl-factor.c static int _get_table_div_mul(const struct clk_factor_table *table, table 33 drivers/clk/actions/owl-factor.c for (clkt = table; clkt->div; clkt++) { table 44 drivers/clk/actions/owl-factor.c static unsigned int _get_table_val(const struct clk_factor_table *table, table 51 drivers/clk/actions/owl-factor.c for (clkt = table; clkt->div; clkt++) { table 62 drivers/clk/actions/owl-factor.c val = _get_table_maxval(table); table 71 drivers/clk/actions/owl-factor.c const struct clk_factor_table *clkt = factor_hw->table; table 85 drivers/clk/actions/owl-factor.c for (clkt = factor_hw->table; clkt->div; clkt++) { table 125 drivers/clk/actions/owl-factor.c const struct clk_factor_table *clkt = factor_hw->table; table 148 drivers/clk/actions/owl-factor.c const struct clk_factor_table *clkt = factor_hw->table; table 191 drivers/clk/actions/owl-factor.c val = _get_table_val(factor_hw->table, rate, parent_rate); table 27 drivers/clk/actions/owl-factor.h struct clk_factor_table *table; table 41 drivers/clk/actions/owl-factor.h .table = _table, \ table 31 drivers/clk/actions/owl-pll.c static unsigned long _get_table_rate(const struct clk_pll_table *table, table 36 drivers/clk/actions/owl-pll.c for (clkt = table; clkt->rate; clkt++) table 44 drivers/clk/actions/owl-pll.c const struct clk_pll_table *table, unsigned long rate) table 48 drivers/clk/actions/owl-pll.c for (clkt = table; clkt->rate; clkt++) { table 50 drivers/clk/actions/owl-pll.c table = clkt; table 53 drivers/clk/actions/owl-pll.c table = clkt; table 56 drivers/clk/actions/owl-pll.c return table; table 67 drivers/clk/actions/owl-pll.c if (pll_hw->table) { table 68 drivers/clk/actions/owl-pll.c clkt = _get_pll_table(pll_hw->table, rate); table 89 drivers/clk/actions/owl-pll.c if (pll_hw->table) { table 95 drivers/clk/actions/owl-pll.c return _get_table_rate(pll_hw->table, val); table 168 drivers/clk/actions/owl-pll.c if (pll_hw->table) { table 169 drivers/clk/actions/owl-pll.c clkt = _get_pll_table(pll_hw->table, rate); table 33 drivers/clk/actions/owl-pll.h const struct clk_pll_table *table; table 52 drivers/clk/actions/owl-pll.h .table = _table, \ table 1364 drivers/clk/bcm/clk-bcm2835.c divider->div.table = NULL; table 147 drivers/clk/bcm/clk-bcm63xx-gate.c const struct clk_bcm63xx_table_entry *entry, *table; table 152 drivers/clk/bcm/clk-bcm63xx-gate.c table = of_device_get_match_data(&pdev->dev); table 153 drivers/clk/bcm/clk-bcm63xx-gate.c if (!table) table 156 drivers/clk/bcm/clk-bcm63xx-gate.c for (entry = table; entry->name; entry++) table 176 drivers/clk/bcm/clk-bcm63xx-gate.c for (entry = table; entry->name; entry++) { table 81 drivers/clk/clk-asm9260.c u32 *table; table 293 drivers/clk/clk-asm9260.c 0, mc->mask, 0, mc->table, &asm9260_clk_lock); table 44 drivers/clk/clk-divider.c static unsigned int _get_table_maxdiv(const struct clk_div_table *table, table 50 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) table 56 drivers/clk/clk-divider.c static unsigned int _get_table_mindiv(const struct clk_div_table *table) table 61 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) table 67 drivers/clk/clk-divider.c static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, table 74 drivers/clk/clk-divider.c if (table) table 75 drivers/clk/clk-divider.c return _get_table_maxdiv(table, width); table 79 drivers/clk/clk-divider.c static unsigned int _get_table_div(const struct clk_div_table *table, table 84 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) table 90 drivers/clk/clk-divider.c static unsigned int _get_div(const struct clk_div_table *table, table 99 drivers/clk/clk-divider.c if (table) table 100 drivers/clk/clk-divider.c return _get_table_div(table, val); table 104 drivers/clk/clk-divider.c static unsigned int _get_table_val(const struct clk_div_table *table, table 109 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) table 115 drivers/clk/clk-divider.c static unsigned int _get_val(const struct clk_div_table *table, table 124 drivers/clk/clk-divider.c if (table) table 125 drivers/clk/clk-divider.c return _get_table_val(table, div); table 131 drivers/clk/clk-divider.c const struct clk_div_table *table, table 136 drivers/clk/clk-divider.c div = _get_div(table, val, flags, width); table 157 drivers/clk/clk-divider.c return divider_recalc_rate(hw, parent_rate, val, divider->table, table 161 drivers/clk/clk-divider.c static bool _is_valid_table_div(const struct clk_div_table *table, table 166 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) table 172 drivers/clk/clk-divider.c static bool _is_valid_div(const struct clk_div_table *table, unsigned int div, table 177 drivers/clk/clk-divider.c if (table) table 178 drivers/clk/clk-divider.c return _is_valid_table_div(table, div); table 182 drivers/clk/clk-divider.c static int _round_up_table(const struct clk_div_table *table, int div) table 187 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) { table 200 drivers/clk/clk-divider.c static int _round_down_table(const struct clk_div_table *table, int div) table 203 drivers/clk/clk-divider.c int down = _get_table_mindiv(table); table 205 drivers/clk/clk-divider.c for (clkt = table; clkt->div; clkt++) { table 218 drivers/clk/clk-divider.c static int _div_round_up(const struct clk_div_table *table, table 226 drivers/clk/clk-divider.c if (table) table 227 drivers/clk/clk-divider.c div = _round_up_table(table, div); table 232 drivers/clk/clk-divider.c static int _div_round_closest(const struct clk_div_table *table, table 245 drivers/clk/clk-divider.c } else if (table) { table 246 drivers/clk/clk-divider.c up = _round_up_table(table, up); table 247 drivers/clk/clk-divider.c down = _round_down_table(table, down); table 256 drivers/clk/clk-divider.c static int _div_round(const struct clk_div_table *table, table 261 drivers/clk/clk-divider.c return _div_round_closest(table, parent_rate, rate, flags); table 263 drivers/clk/clk-divider.c return _div_round_up(table, parent_rate, rate, flags); table 275 drivers/clk/clk-divider.c static int _next_div(const struct clk_div_table *table, int div, table 282 drivers/clk/clk-divider.c if (table) table 283 drivers/clk/clk-divider.c return _round_up_table(table, div); table 291 drivers/clk/clk-divider.c const struct clk_div_table *table, u8 width, table 301 drivers/clk/clk-divider.c maxdiv = _get_maxdiv(table, width, flags); table 305 drivers/clk/clk-divider.c bestdiv = _div_round(table, parent_rate, rate, flags); table 317 drivers/clk/clk-divider.c for (i = _next_div(table, 0, flags); i <= maxdiv; table 318 drivers/clk/clk-divider.c i = _next_div(table, i, flags)) { table 338 drivers/clk/clk-divider.c bestdiv = _get_maxdiv(table, width, flags); table 347 drivers/clk/clk-divider.c const struct clk_div_table *table, table 352 drivers/clk/clk-divider.c div = clk_divider_bestdiv(hw, parent, rate, prate, table, width, flags); table 360 drivers/clk/clk-divider.c const struct clk_div_table *table, u8 width, table 365 drivers/clk/clk-divider.c div = _get_div(table, val, flags, width); table 392 drivers/clk/clk-divider.c return divider_ro_round_rate(hw, rate, prate, divider->table, table 397 drivers/clk/clk-divider.c return divider_round_rate(hw, rate, prate, divider->table, table 402 drivers/clk/clk-divider.c const struct clk_div_table *table, u8 width, table 409 drivers/clk/clk-divider.c if (!_is_valid_div(table, div, flags)) table 412 drivers/clk/clk-divider.c value = _get_val(table, div, flags, width); table 426 drivers/clk/clk-divider.c value = divider_get_val(rate, parent_rate, divider->table, table 469 drivers/clk/clk-divider.c u8 clk_divider_flags, const struct clk_div_table *table, table 505 drivers/clk/clk-divider.c div->table = table; table 584 drivers/clk/clk-divider.c u8 clk_divider_flags, const struct clk_div_table *table, table 590 drivers/clk/clk-divider.c width, clk_divider_flags, table, lock); table 614 drivers/clk/clk-divider.c u8 clk_divider_flags, const struct clk_div_table *table, table 618 drivers/clk/clk-divider.c width, clk_divider_flags, table, lock); table 75 drivers/clk/clk-milbeaut.c const struct clk_div_table *table; table 95 drivers/clk/clk-milbeaut.c u32 *table; table 288 drivers/clk/clk-milbeaut.c return clk_mux_val_to_index(hw, mux->table, mux->flags, val); table 294 drivers/clk/clk-milbeaut.c u32 val = clk_mux_index_to_val(mux->table, mux->flags, index); table 328 drivers/clk/clk-milbeaut.c u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, table 351 drivers/clk/clk-milbeaut.c mux->table = table; table 371 drivers/clk/clk-milbeaut.c const struct clk_div_table *table; table 385 drivers/clk/clk-milbeaut.c return divider_recalc_rate(hw, parent_rate, val, divider->table, table 401 drivers/clk/clk-milbeaut.c return divider_ro_round_rate(hw, rate, prate, divider->table, table 406 drivers/clk/clk-milbeaut.c return divider_round_rate(hw, rate, prate, divider->table, table 419 drivers/clk/clk-milbeaut.c value = divider_get_val(rate, parent_rate, divider->table, table 460 drivers/clk/clk-milbeaut.c u8 clk_divider_flags, const struct clk_div_table *table, table 484 drivers/clk/clk-milbeaut.c div->table = table; table 520 drivers/clk/clk-milbeaut.c factors->table, table 554 drivers/clk/clk-milbeaut.c factors->table, &m10v_crglock); table 42 drivers/clk/clk-mux.c int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, table 47 drivers/clk/clk-mux.c if (table) { table 51 drivers/clk/clk-mux.c if (table[i] == val) table 69 drivers/clk/clk-mux.c unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index) table 73 drivers/clk/clk-mux.c if (table) { table 74 drivers/clk/clk-mux.c val = table[index]; table 95 drivers/clk/clk-mux.c return clk_mux_val_to_index(hw, mux->table, mux->flags, val); table 101 drivers/clk/clk-mux.c u32 val = clk_mux_index_to_val(mux->table, mux->flags, index); table 152 drivers/clk/clk-mux.c u8 clk_mux_flags, u32 *table, spinlock_t *lock) table 188 drivers/clk/clk-mux.c mux->table = table; table 206 drivers/clk/clk-mux.c u8 clk_mux_flags, u32 *table, spinlock_t *lock) table 212 drivers/clk/clk-mux.c table, lock); table 149 drivers/clk/clk-npcm7xx.c u32 *table; table 610 drivers/clk/clk-npcm7xx.c mux_data->table, &npcm7xx_clk_lock); table 748 drivers/clk/clk-stm32f4.c u8 clk_divider_flags, const struct clk_div_table *table, table 773 drivers/clk/clk-stm32f4.c pll_div->div.table = table; table 853 drivers/clk/clk-stm32f4.c u64 table[MAX_GATE_MAP]; table 861 drivers/clk/clk-stm32f4.c memcpy(table, stm32f4_gate_map, sizeof(table)); table 864 drivers/clk/clk-stm32f4.c if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) || table 865 drivers/clk/clk-stm32f4.c 0 == (table[BIT_ULL_WORD(secondary)] & table 870 drivers/clk/clk-stm32f4.c table[BIT_ULL_WORD(secondary)] &= table 873 drivers/clk/clk-stm32f4.c return stm32fx_end_primary_clk - 1 + hweight64(table[0]) + table 874 drivers/clk/clk-stm32f4.c (BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) + table 875 drivers/clk/clk-stm32f4.c (BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0); table 344 drivers/clk/clk-stm32mp1.c const struct clk_div_table *table; table 352 drivers/clk/clk-stm32mp1.c u32 *table; table 426 drivers/clk/clk-stm32mp1.c div_cfg->table, table 489 drivers/clk/clk-stm32mp1.c mmux->mux.table = cfg->mux->table; table 504 drivers/clk/clk-stm32mp1.c mux->table = cfg->mux->table; table 527 drivers/clk/clk-stm32mp1.c div->table = cfg->div->table; table 1131 drivers/clk/clk-stm32mp1.c .table = _div_table,\ table 1237 drivers/clk/clk-stm32mp1.c .table = _div_table,\ table 1253 drivers/clk/clk-stm32mp1.c .table = NULL,\ table 1604 drivers/clk/clk-stm32mp1.c .table = NULL,\ table 85 drivers/clk/davinci/pll-da850.c .table = da850_pll0_obsclk_table, table 194 drivers/clk/davinci/pll-da850.c .table = da850_pll1_obsclk_table, table 56 drivers/clk/davinci/pll-dm365.c .table = dm365_pll_obsclk_table, table 119 drivers/clk/davinci/pll-dm365.c .table = dm365_pll_obsclk_table, table 589 drivers/clk/davinci/pll.c mux->table = info->table; table 91 drivers/clk/davinci/pll.h u32 *table; table 165 drivers/clk/hisilicon/clk.c clks[i].table, &hisi_clk_lock); table 226 drivers/clk/hisilicon/clk.c clks[i].table, table 53 drivers/clk/hisilicon/clk.h u32 *table; table 79 drivers/clk/hisilicon/clk.h struct clk_div_table *table; table 38 drivers/clk/hisilicon/clkdivider-hi6220.c const struct clk_div_table *table; table 54 drivers/clk/hisilicon/clkdivider-hi6220.c return divider_recalc_rate(hw, parent_rate, val, dclk->table, table 63 drivers/clk/hisilicon/clkdivider-hi6220.c return divider_round_rate(hw, rate, prate, dclk->table, table 75 drivers/clk/hisilicon/clkdivider-hi6220.c value = divider_get_val(rate, parent_rate, dclk->table, table 107 drivers/clk/hisilicon/clkdivider-hi6220.c struct clk_div_table *table; table 120 drivers/clk/hisilicon/clkdivider-hi6220.c table = kcalloc(max_div + 1, sizeof(*table), GFP_KERNEL); table 121 drivers/clk/hisilicon/clkdivider-hi6220.c if (!table) { table 127 drivers/clk/hisilicon/clkdivider-hi6220.c table[i].div = min_div + i; table 128 drivers/clk/hisilicon/clkdivider-hi6220.c table[i].val = table[i].div - 1; table 144 drivers/clk/hisilicon/clkdivider-hi6220.c div->table = table; table 149 drivers/clk/hisilicon/clkdivider-hi6220.c kfree(table); table 37 drivers/clk/imx/clk-divider-gate.c return divider_recalc_rate(hw, parent_rate, val, div->table, table 63 drivers/clk/imx/clk-divider-gate.c return divider_recalc_rate(hw, parent_rate, val, div->table, table 82 drivers/clk/imx/clk-divider-gate.c value = divider_get_val(rate, parent_rate, div->table, table 179 drivers/clk/imx/clk-divider-gate.c const struct clk_div_table *table, table 205 drivers/clk/imx/clk-divider-gate.c div_gate->divider.table = table; table 471 drivers/clk/imx/clk.h u8 clk_divider_flags, const struct clk_div_table *table, table 126 drivers/clk/meson/axg-aoclk.c .table = axg_32k_div_table, table 218 drivers/clk/meson/axg.c .table = axg_gp0_pll_params_table, table 290 drivers/clk/meson/axg.c .table = axg_gp0_pll_params_table, table 743 drivers/clk/meson/axg.c .table = axg_pcie_pll_params_table, table 799 drivers/clk/meson/axg.c .table = (u32[]){ 1 }, table 816 drivers/clk/meson/axg.c .table = (u32[]){ 1 }, table 872 drivers/clk/meson/axg.c .table = mux_table_clk81, table 1050 drivers/clk/meson/axg.c .table = mux_table_gen_clk, table 67 drivers/clk/meson/clk-dualdiv.c const struct meson_clk_dualdiv_param *table = dualdiv->table; table 71 drivers/clk/meson/clk-dualdiv.c if (!table) table 74 drivers/clk/meson/clk-dualdiv.c for (i = 0; table[i].n1; i++) { table 75 drivers/clk/meson/clk-dualdiv.c now = __dualdiv_param_to_rate(parent_rate, &table[i]); table 79 drivers/clk/meson/clk-dualdiv.c return &table[i]; table 86 drivers/clk/meson/clk-dualdiv.c return (struct meson_clk_dualdiv_param *)&table[best_i]; table 27 drivers/clk/meson/clk-dualdiv.h const struct meson_clk_dualdiv_param *table; table 144 drivers/clk/meson/clk-pll.c if (!pll->table[index].n) table 147 drivers/clk/meson/clk-pll.c *m = pll->table[index].m; table 148 drivers/clk/meson/clk-pll.c *n = pll->table[index].n; table 209 drivers/clk/meson/clk-pll.c else if (pll->table) table 41 drivers/clk/meson/clk-pll.h const struct pll_params_table *table; table 74 drivers/clk/meson/clk-regmap.c return divider_recalc_rate(hw, prate, val, div->table, div->flags, table 96 drivers/clk/meson/clk-regmap.c return divider_ro_round_rate(hw, rate, prate, div->table, table 100 drivers/clk/meson/clk-regmap.c return divider_round_rate(hw, rate, prate, div->table, div->width, table 112 drivers/clk/meson/clk-regmap.c ret = divider_get_val(rate, parent_rate, div->table, div->width, table 150 drivers/clk/meson/clk-regmap.c return clk_mux_val_to_index(hw, mux->table, mux->flags, val); table 157 drivers/clk/meson/clk-regmap.c unsigned int val = clk_mux_index_to_val(mux->table, mux->flags, index); table 72 drivers/clk/meson/clk-regmap.h const struct clk_div_table *table; table 99 drivers/clk/meson/clk-regmap.h u32 *table; table 145 drivers/clk/meson/g12a-aoclk.c .table = g12a_32k_div_table, table 236 drivers/clk/meson/g12a-aoclk.c .table = g12a_32k_div_table, table 1426 drivers/clk/meson/g12a.c .table = mux_table_cpub, table 1469 drivers/clk/meson/g12a.c .table = mux_table_cpub, table 1512 drivers/clk/meson/g12a.c .table = mux_table_cpub, table 1555 drivers/clk/meson/g12a.c .table = mux_table_cpub, table 1874 drivers/clk/meson/g12a.c .table = g12a_pcie_pll_table, table 2419 drivers/clk/meson/g12a.c .table = mux_table_clk81, table 3511 drivers/clk/meson/g12a.c .table = mux_table_cts_sel, table 3527 drivers/clk/meson/g12a.c .table = mux_table_cts_sel, table 3543 drivers/clk/meson/g12a.c .table = mux_table_cts_sel, table 3574 drivers/clk/meson/g12a.c .table = mux_table_hdmi_tx_sel, table 112 drivers/clk/meson/gxbb-aoclk.c .table = gxbb_32k_div_table, table 160 drivers/clk/meson/gxbb-aoclk.c .table = (u32[]){ 1, 2, 3, 4 }, table 461 drivers/clk/meson/gxbb.c .table = gxbb_gp0_pll_params_table, table 515 drivers/clk/meson/gxbb.c .table = gxl_gp0_pll_params_table, table 859 drivers/clk/meson/gxbb.c .table = mux_table_clk81, table 1105 drivers/clk/meson/gxbb.c .table = (u32[]){ 1, 2, 3 }, table 1159 drivers/clk/meson/gxbb.c .table = (u32[]){ 1, 2, 3 }, table 2214 drivers/clk/meson/gxbb.c .table = mux_table_cts_sel, table 2230 drivers/clk/meson/gxbb.c .table = mux_table_cts_sel, table 2246 drivers/clk/meson/gxbb.c .table = mux_table_cts_sel, table 2277 drivers/clk/meson/gxbb.c .table = mux_table_hdmi_tx_sel, table 2542 drivers/clk/meson/gxbb.c .table = mux_table_gen_clk, table 235 drivers/clk/meson/meson8b.c .table = sys_pll_params_table, table 574 drivers/clk/meson/meson8b.c .table = mux_table_clk81, table 689 drivers/clk/meson/meson8b.c .table = cpu_scale_table, table 709 drivers/clk/meson/meson8b.c .table = mux_table_cpu_scale_out_sel, table 904 drivers/clk/meson/meson8b.c .table = mux_table_apb, table 984 drivers/clk/meson/meson8b.c .table = mux_table_axi, table 1790 drivers/clk/meson/meson8b.c .table = meson8b_mali_0_1_mux_table, table 1845 drivers/clk/meson/meson8b.c .table = meson8b_mali_0_1_mux_table, table 1945 drivers/clk/meson/meson8b.c .table = meson8m2_gp_pll_params_table, table 2444 drivers/clk/meson/meson8b.c .table = meson8b_cts_amclk_mux_table, table 2503 drivers/clk/meson/meson8b.c .table = meson8b_cts_mclk_i958_mux_table, table 111 drivers/clk/mmp/clk-mix.c struct mmp_clk_mix_clk_table *table, table 122 drivers/clk/mmp/clk-mix.c item = &table[i]; table 222 drivers/clk/mmp/clk-mix.c if (mix->table) { table 224 drivers/clk/mmp/clk-mix.c item = &mix->table[i]; table 355 drivers/clk/mmp/clk-mix.c if (mix->table) { table 357 drivers/clk/mmp/clk-mix.c item = &mix->table[i]; table 388 drivers/clk/mmp/clk-mix.c if (mix->table) { table 390 drivers/clk/mmp/clk-mix.c item = &mix->table[i]; table 426 drivers/clk/mmp/clk-mix.c if (mix->table) table 427 drivers/clk/mmp/clk-mix.c _filter_clk_table(mix, mix->table, mix->table_size); table 464 drivers/clk/mmp/clk-mix.c if (config->table) { table 465 drivers/clk/mmp/clk-mix.c table_bytes = sizeof(*config->table) * config->table_size; table 466 drivers/clk/mmp/clk-mix.c mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL); table 467 drivers/clk/mmp/clk-mix.c if (!mix->table) table 478 drivers/clk/mmp/clk-mix.c kfree(mix->table); table 498 drivers/clk/mmp/clk-mix.c kfree(mix->table); table 76 drivers/clk/mmp/clk.h struct mmp_clk_mix_clk_table *table; table 87 drivers/clk/mmp/clk.h struct mmp_clk_mix_clk_table *table; table 161 drivers/clk/mvebu/armada-37xx-periph.c .table = _table, \ table 651 drivers/clk/mvebu/armada-37xx-periph.c for (clkt = rate->table; clkt->div; clkt++) table 183 drivers/clk/nxp/clk-lpc18xx-cgu.c .table = lpc18xx_cgu_ ##_table, \ table 213 drivers/clk/nxp/clk-lpc18xx-cgu.c .table = lpc18xx_cgu_ ##_table, \ table 280 drivers/clk/nxp/clk-lpc18xx-cgu.c .table = lpc18xx_cgu_ ##_table, \ table 545 drivers/clk/nxp/clk-lpc18xx-cgu.c lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents); table 567 drivers/clk/nxp/clk-lpc18xx-cgu.c lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents); table 592 drivers/clk/nxp/clk-lpc18xx-cgu.c lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents); table 346 drivers/clk/nxp/clk-lpc32xx.c u32 *table; table 355 drivers/clk/nxp/clk-lpc32xx.c const struct clk_div_table *table; table 922 drivers/clk/nxp/clk-lpc32xx.c static unsigned int _get_table_div(const struct clk_div_table *table, table 927 drivers/clk/nxp/clk-lpc32xx.c for (clkt = table; clkt->div; clkt++) table 933 drivers/clk/nxp/clk-lpc32xx.c static unsigned int _get_div(const struct clk_div_table *table, table 938 drivers/clk/nxp/clk-lpc32xx.c if (table) table 939 drivers/clk/nxp/clk-lpc32xx.c return _get_table_div(table, val); table 954 drivers/clk/nxp/clk-lpc32xx.c return divider_recalc_rate(hw, parent_rate, val, divider->table, table 969 drivers/clk/nxp/clk-lpc32xx.c bestdiv = _get_div(divider->table, bestdiv, divider->flags, table 974 drivers/clk/nxp/clk-lpc32xx.c return divider_round_rate(hw, rate, prate, divider->table, table 984 drivers/clk/nxp/clk-lpc32xx.c value = divider_get_val(rate, parent_rate, divider->table, table 1008 drivers/clk/nxp/clk-lpc32xx.c if (mux->table) { table 1012 drivers/clk/nxp/clk-lpc32xx.c if (mux->table[i] == val) table 1027 drivers/clk/nxp/clk-lpc32xx.c if (mux->table) table 1028 drivers/clk/nxp/clk-lpc32xx.c index = mux->table[index]; table 1122 drivers/clk/nxp/clk-lpc32xx.c .table = (_table), \ table 1141 drivers/clk/nxp/clk-lpc32xx.c .table = (_table), \ table 955 drivers/clk/qcom/clk-alpha-pll.c const struct clk_div_table *table; table 958 drivers/clk/qcom/clk-alpha-pll.c table = clk_alpha_2bit_div_table; table 960 drivers/clk/qcom/clk-alpha-pll.c table = clk_alpha_div_table; table 962 drivers/clk/qcom/clk-alpha-pll.c return divider_round_rate(hw, rate, prate, table, table 64 drivers/clk/renesas/clk-r8a73a4.c const struct clk_div_table *table = NULL; table 174 drivers/clk/renesas/clk-r8a73a4.c table = div4_div_table; table 179 drivers/clk/renesas/clk-r8a73a4.c if (!table) { table 185 drivers/clk/renesas/clk-r8a73a4.c table, &cpg->lock); table 66 drivers/clk/renesas/clk-r8a7740.c const struct clk_div_table *table = NULL; table 125 drivers/clk/renesas/clk-r8a7740.c table = div4_div_table; table 135 drivers/clk/renesas/clk-r8a7740.c if (!table) { table 141 drivers/clk/renesas/clk-r8a7740.c table, &cpg->lock); table 221 drivers/clk/renesas/clk-rcar-gen2.c div->table = cpg_adsp_div_table; table 312 drivers/clk/renesas/clk-rcar-gen2.c const struct clk_div_table *table = NULL; table 351 drivers/clk/renesas/clk-rcar-gen2.c table = cpg_sdh_div_table; table 355 drivers/clk/renesas/clk-rcar-gen2.c table = cpg_sd01_div_table; table 359 drivers/clk/renesas/clk-rcar-gen2.c table = cpg_sd01_div_table; table 371 drivers/clk/renesas/clk-rcar-gen2.c if (!table) table 377 drivers/clk/renesas/clk-rcar-gen2.c 4, 0, table, &cpg->lock); table 78 drivers/clk/renesas/clk-sh73a0.c const struct clk_div_table *table = NULL; table 131 drivers/clk/renesas/clk-sh73a0.c table = z_div_table; table 141 drivers/clk/renesas/clk-sh73a0.c table = div4_div_table; table 152 drivers/clk/renesas/clk-sh73a0.c if (!table) { table 158 drivers/clk/renesas/clk-sh73a0.c table, &cpg->lock); table 227 drivers/clk/renesas/r8a77970-cpg-mssr.c const struct clk_div_table *table; table 233 drivers/clk/renesas/r8a77970-cpg-mssr.c table = cpg_sd0h_div_table; table 237 drivers/clk/renesas/r8a77970-cpg-mssr.c table = cpg_sd0_div_table; table 252 drivers/clk/renesas/r8a77970-cpg-mssr.c shift, 4, 0, table, &cpg_lock); table 549 drivers/clk/renesas/r9a06g032-clocks.c u16 table[8]; /* we know there are no more than 8 */ table 590 drivers/clk/renesas/r9a06g032-clocks.c if (div >= clk->table[i] && div <= clk->table[i + 1]) { table 592 drivers/clk/renesas/r9a06g032-clocks.c DIV_ROUND_UP(prate, clk->table[i]); table 594 drivers/clk/renesas/r9a06g032-clocks.c DIV_ROUND_UP(prate, clk->table[i + 1]) - table 600 drivers/clk/renesas/r9a06g032-clocks.c div = p >= m ? clk->table[i] : clk->table[i + 1]; table 697 drivers/clk/renesas/r9a06g032-clocks.c for (i = 0; i < ARRAY_SIZE(div->table) && table 699 drivers/clk/renesas/r9a06g032-clocks.c div->table[div->table_size++] = desc->div_table[i]; table 220 drivers/clk/renesas/rcar-gen2-cpg.c div->table = cpg_adsp_div_table; table 278 drivers/clk/renesas/rcar-gen2-cpg.c const struct clk_div_table *table = NULL; table 333 drivers/clk/renesas/rcar-gen2-cpg.c table = cpg_sdh_div_table; table 338 drivers/clk/renesas/rcar-gen2-cpg.c table = cpg_sd01_div_table; table 340 drivers/clk/renesas/rcar-gen2-cpg.c table++; table 346 drivers/clk/renesas/rcar-gen2-cpg.c table = cpg_sd01_div_table; table 348 drivers/clk/renesas/rcar-gen2-cpg.c table++; table 365 drivers/clk/renesas/rcar-gen2-cpg.c if (!table) table 372 drivers/clk/renesas/rcar-gen2-cpg.c 0, table, &cpg_lock); table 455 drivers/clk/renesas/rcar-gen3-cpg.c rpc->div.table = cpg_rpc_div_table; table 97 drivers/clk/rockchip/clk.c div->table = div_table; table 208 drivers/clk/samsung/clk.c if (list->table) table 213 drivers/clk/samsung/clk.c list->table, &ctx->lock); table 166 drivers/clk/samsung/clk.h struct clk_div_table *table; table 179 drivers/clk/samsung/clk.h .table = t, \ table 26 drivers/clk/sprd/mux.c if (!mux->table) table 32 drivers/clk/sprd/mux.c if (parent >= mux->table[i] && parent < mux->table[i + 1]) table 52 drivers/clk/sprd/mux.c if (mux->table) table 53 drivers/clk/sprd/mux.c index = mux->table[index]; table 24 drivers/clk/sprd/mux.h const u8 *table; table 36 drivers/clk/sprd/mux.h .table = _table, \ table 86 drivers/clk/sprd/pll.c static u32 pll_get_ibias(u64 rate, const u64 *table) table 88 drivers/clk/sprd/pll.c u32 i, num = table[0]; table 91 drivers/clk/sprd/pll.c if (rate <= table[i]) table 227 drivers/clk/st/clk-flexgen.c fgxbar->mux.table = NULL; table 749 drivers/clk/sunxi-ng/ccu-sun6i-a31.c .table = clk_out_table, table 770 drivers/clk/sunxi-ng/ccu-sun6i-a31.c .table = clk_out_table, table 791 drivers/clk/sunxi-ng/ccu-sun6i-a31.c .table = clk_out_table, table 26 drivers/clk/sunxi-ng/ccu_div.c cd->div.table, cd->div.width, table 70 drivers/clk/sunxi-ng/ccu_div.c val = divider_recalc_rate(hw, parent_rate, val, cd->div.table, table 102 drivers/clk/sunxi-ng/ccu_div.c val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width, table 40 drivers/clk/sunxi-ng/ccu_div.h struct clk_div_table *table; table 48 drivers/clk/sunxi-ng/ccu_div.h .table = _table, \ table 166 drivers/clk/sunxi-ng/ccu_mux.c if (cm->table) { table 171 drivers/clk/sunxi-ng/ccu_mux.c if (cm->table[i] == parent) table 185 drivers/clk/sunxi-ng/ccu_mux.c if (cm->table) table 186 drivers/clk/sunxi-ng/ccu_mux.c index = cm->table[index]; table 23 drivers/clk/sunxi-ng/ccu_mux.h const u8 *table; table 36 drivers/clk/sunxi-ng/ccu_mux.h .table = _table, \ table 37 drivers/clk/sunxi-ng/ccu_sdm.c if (sdm->table[i].rate == rate) table 38 drivers/clk/sunxi-ng/ccu_sdm.c writel(sdm->table[i].pattern, table 100 drivers/clk/sunxi-ng/ccu_sdm.c if (sdm->table[i].rate == rate) table 128 drivers/clk/sunxi-ng/ccu_sdm.c if (sdm->table[i].pattern == reg && table 129 drivers/clk/sunxi-ng/ccu_sdm.c sdm->table[i].m == m && sdm->table[i].n == n) table 130 drivers/clk/sunxi-ng/ccu_sdm.c return sdm->table[i].rate; table 147 drivers/clk/sunxi-ng/ccu_sdm.c if (sdm->table[i].rate == rate) { table 148 drivers/clk/sunxi-ng/ccu_sdm.c *m = sdm->table[i].m; table 149 drivers/clk/sunxi-ng/ccu_sdm.c *n = sdm->table[i].n; table 32 drivers/clk/sunxi-ng/ccu_sdm.h struct ccu_sdm_setting *table; table 44 drivers/clk/sunxi-ng/ccu_sdm.h .table = _table, \ table 87 drivers/clk/sunxi/clk-a20-gmac.c mux->table = sun7i_a20_gmac_mux_table; table 209 drivers/clk/sunxi/clk-factors.c factors->config = data->table; table 36 drivers/clk/sunxi/clk-factors.h const struct clk_factors_config *table; table 62 drivers/clk/sunxi/clk-mod0.c .table = &sun4i_a10_mod0_config, table 125 drivers/clk/sunxi/clk-mod0.c .table = &sun4i_a10_mod0_config, table 65 drivers/clk/sunxi/clk-sun6i-ar100.c .table = &sun6i_ar100_config, table 70 drivers/clk/sunxi/clk-sun9i-core.c .table = &sun9i_a80_pll4_config, table 124 drivers/clk/sunxi/clk-sun9i-core.c .table = &sun9i_a80_gt_config, table 179 drivers/clk/sunxi/clk-sun9i-core.c .table = &sun9i_a80_ahb_config, table 205 drivers/clk/sunxi/clk-sun9i-core.c .table = &sun9i_a80_ahb_config, table 262 drivers/clk/sunxi/clk-sun9i-core.c .table = &sun9i_a80_apb1_config, table 492 drivers/clk/sunxi/clk-sunxi.c .table = &sun4i_pll1_config, table 498 drivers/clk/sunxi/clk-sunxi.c .table = &sun6i_a31_pll1_config, table 504 drivers/clk/sunxi/clk-sunxi.c .table = &sun8i_a23_pll1_config, table 510 drivers/clk/sunxi/clk-sunxi.c .table = &sun4i_pll5_config, table 516 drivers/clk/sunxi/clk-sunxi.c .table = &sun4i_pll5_config, table 522 drivers/clk/sunxi/clk-sunxi.c .table = &sun6i_a31_pll6_config, table 529 drivers/clk/sunxi/clk-sunxi.c .table = &sun5i_a13_ahb_config, table 536 drivers/clk/sunxi/clk-sunxi.c .table = &sun6i_ahb1_config, table 544 drivers/clk/sunxi/clk-sunxi.c .table = &sun4i_apb1_config, table 552 drivers/clk/sunxi/clk-sunxi.c .table = &sun7i_a20_out_config, table 728 drivers/clk/sunxi/clk-sunxi.c const struct clk_div_table *table; table 751 drivers/clk/sunxi/clk-sunxi.c .table = sun8i_a23_axi_table, table 772 drivers/clk/sunxi/clk-sunxi.c .table = sun4i_apb0_table, table 800 drivers/clk/sunxi/clk-sunxi.c data->table, &clk_lock); table 885 drivers/clk/sunxi/clk-sunxi.c struct clk_div_table *table; /* is it a table based divisor? */ table 916 drivers/clk/sunxi/clk-sunxi.c { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */ table 1070 drivers/clk/sunxi/clk-sunxi.c divider->table = data->div[i].table; table 1158 drivers/clk/sunxi/clk-sunxi.c .table = &sun6i_display_config, table 590 drivers/clk/tegra/clk.h .table = _table, \ table 55 drivers/clk/tegra/cvb.c static int build_opp_table(struct device *dev, const struct cvb_table *table, table 61 drivers/clk/tegra/cvb.c min_mv = round_voltage(table->min_millivolts, align, UP); table 62 drivers/clk/tegra/cvb.c max_mv = round_voltage(table->max_millivolts, align, DOWN); table 65 drivers/clk/tegra/cvb.c const struct cvb_table_freq_entry *entry = &table->entries[i]; table 70 drivers/clk/tegra/cvb.c dfll_mv = get_cvb_voltage(speedo_value, table->speedo_scale, table 72 drivers/clk/tegra/cvb.c dfll_mv = round_cvb_voltage(dfll_mv, table->voltage_scale, table 111 drivers/clk/tegra/cvb.c const struct cvb_table *table = &tables[i]; table 113 drivers/clk/tegra/cvb.c if (table->speedo_id != -1 && table->speedo_id != speedo_id) table 116 drivers/clk/tegra/cvb.c if (table->process_id != -1 && table->process_id != process_id) table 119 drivers/clk/tegra/cvb.c ret = build_opp_table(dev, table, align, speedo_value, table 121 drivers/clk/tegra/cvb.c return ret ? ERR_PTR(ret) : table; table 128 drivers/clk/tegra/cvb.c const struct cvb_table *table, table 134 drivers/clk/tegra/cvb.c const struct cvb_table_freq_entry *entry = &table->entries[i]; table 57 drivers/clk/tegra/cvb.h const struct cvb_table *table, table 384 drivers/clk/ti/clkctrl.c &div->width, &div->table)) { table 26 drivers/clk/ti/clock.h const struct clk_div_table *table; table 35 drivers/clk/ti/clock.h u32 *table; table 224 drivers/clk/ti/clock.h const struct clk_div_table **table); table 31 drivers/clk/ti/divider.c static unsigned int _get_table_maxdiv(const struct clk_div_table *table) table 36 drivers/clk/ti/divider.c for (clkt = table; clkt->div; clkt++) table 48 drivers/clk/ti/divider.c if (divider->table) table 49 drivers/clk/ti/divider.c return _get_table_maxdiv(divider->table); table 53 drivers/clk/ti/divider.c static unsigned int _get_table_div(const struct clk_div_table *table, table 58 drivers/clk/ti/divider.c for (clkt = table; clkt->div; clkt++) table 70 drivers/clk/ti/divider.c if (divider->table) table 71 drivers/clk/ti/divider.c return _get_table_div(divider->table, val); table 75 drivers/clk/ti/divider.c static unsigned int _get_table_val(const struct clk_div_table *table, table 80 drivers/clk/ti/divider.c for (clkt = table; clkt->div; clkt++) table 92 drivers/clk/ti/divider.c if (divider->table) table 93 drivers/clk/ti/divider.c return _get_table_val(divider->table, div); table 123 drivers/clk/ti/divider.c static bool _is_valid_table_div(const struct clk_div_table *table, table 128 drivers/clk/ti/divider.c for (clkt = table; clkt->div; clkt++) table 138 drivers/clk/ti/divider.c if (divider->table) table 139 drivers/clk/ti/divider.c return _is_valid_table_div(divider->table, div); table 143 drivers/clk/ti/divider.c static int _div_round_up(const struct clk_div_table *table, table 150 drivers/clk/ti/divider.c for (clkt = table; clkt->div; clkt++) { table 163 drivers/clk/ti/divider.c static int _div_round(const struct clk_div_table *table, table 166 drivers/clk/ti/divider.c if (!table) table 169 drivers/clk/ti/divider.c return _div_round_up(table, parent_rate, rate); table 187 drivers/clk/ti/divider.c bestdiv = _div_round(divider->table, parent_rate, rate); table 319 drivers/clk/ti/divider.c const struct clk_div_table *table) table 350 drivers/clk/ti/divider.c div->table = table; table 363 drivers/clk/ti/divider.c const struct clk_div_table **table) table 388 drivers/clk/ti/divider.c *table = NULL; table 407 drivers/clk/ti/divider.c *table = ERR_PTR(-ENOMEM); table 423 drivers/clk/ti/divider.c *table = tmp; table 431 drivers/clk/ti/divider.c struct clk_div_table *table; table 459 drivers/clk/ti/divider.c table = kcalloc(valid_div + 1, sizeof(*table), GFP_KERNEL); table 461 drivers/clk/ti/divider.c if (!table) table 469 drivers/clk/ti/divider.c table[valid_div].div = val; table 470 drivers/clk/ti/divider.c table[valid_div].val = i; table 475 drivers/clk/ti/divider.c return table; table 479 drivers/clk/ti/divider.c const struct clk_div_table *table, table 487 drivers/clk/ti/divider.c if (!table) { table 513 drivers/clk/ti/divider.c while (table[div].div) { table 514 drivers/clk/ti/divider.c val = table[div].val; table 523 drivers/clk/ti/divider.c struct clk_omap_reg *reg, const struct clk_div_table **table, table 557 drivers/clk/ti/divider.c *table = ti_clk_get_div_table(node); table 559 drivers/clk/ti/divider.c if (IS_ERR(*table)) table 560 drivers/clk/ti/divider.c return PTR_ERR(*table); table 562 drivers/clk/ti/divider.c *width = _get_divider_width(node, *table, *div_flags); table 582 drivers/clk/ti/divider.c const struct clk_div_table *table = NULL; table 587 drivers/clk/ti/divider.c if (ti_clk_divider_populate(node, ®, &table, &flags, table 592 drivers/clk/ti/divider.c shift, width, latch, clk_divider_flags, table); table 601 drivers/clk/ti/divider.c kfree(table); table 614 drivers/clk/ti/divider.c if (ti_clk_divider_populate(node, &div->reg, &div->table, &val, table 623 drivers/clk/ti/divider.c kfree(div->table); table 45 drivers/clk/ti/mux.c if (mux->table) { table 49 drivers/clk/ti/mux.c if (mux->table[i] == val) table 71 drivers/clk/ti/mux.c if (mux->table) { table 72 drivers/clk/ti/mux.c index = mux->table[index]; table 133 drivers/clk/ti/mux.c s8 latch, u8 clk_mux_flags, u32 *table) table 156 drivers/clk/ti/mux.c mux->table = table; table 198 drivers/clk/zte/clk-zx296702.c const struct clk_div_table *table) table 201 drivers/clk/zte/clk-zx296702.c width, 0, table, ®_lock); table 129 drivers/clk/zte/clk.h .table = _table, \ table 507 drivers/clocksource/arm_arch_timer.c const struct acpi_table_header *table = arg; table 511 drivers/clocksource/arm_arch_timer.c if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && table 512 drivers/clocksource/arm_arch_timer.c !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && table 513 drivers/clocksource/arm_arch_timer.c info->oem_revision == table->oem_revision) table 1564 drivers/clocksource/arm_arch_timer.c static int __init arch_timer_acpi_init(struct acpi_table_header *table) table 1575 drivers/clocksource/arm_arch_timer.c ret = acpi_gtdt_init(table, &platform_timer_count); table 1612 drivers/clocksource/arm_arch_timer.c arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table); table 243 drivers/cpufreq/arm_big_little.c static inline u32 get_table_count(struct cpufreq_frequency_table *table) table 247 drivers/cpufreq/arm_big_little.c for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) table 254 drivers/cpufreq/arm_big_little.c static inline u32 get_table_min(struct cpufreq_frequency_table *table) table 258 drivers/cpufreq/arm_big_little.c cpufreq_for_each_entry(pos, table) table 265 drivers/cpufreq/arm_big_little.c static inline u32 get_table_max(struct cpufreq_frequency_table *table) table 269 drivers/cpufreq/arm_big_little.c cpufreq_for_each_entry(pos, table) table 278 drivers/cpufreq/arm_big_little.c struct cpufreq_frequency_table *table; table 283 drivers/cpufreq/arm_big_little.c table = kcalloc(count, sizeof(*table), GFP_KERNEL); table 284 drivers/cpufreq/arm_big_little.c if (!table) table 287 drivers/cpufreq/arm_big_little.c freq_table[MAX_CLUSTERS] = table; table 293 drivers/cpufreq/arm_big_little.c table[k].frequency = VIRT_FREQ(i, table 296 drivers/cpufreq/arm_big_little.c table[k].frequency); table 301 drivers/cpufreq/arm_big_little.c table[k].driver_data = k; table 302 drivers/cpufreq/arm_big_little.c table[k].frequency = CPUFREQ_TABLE_END; table 304 drivers/cpufreq/arm_big_little.c pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); table 68 drivers/cpufreq/bmips-cpufreq.c struct cpufreq_frequency_table *table; table 74 drivers/cpufreq/bmips-cpufreq.c table = kmalloc_array(priv->max_freqs + 1, sizeof(*table), GFP_KERNEL); table 75 drivers/cpufreq/bmips-cpufreq.c if (!table) table 79 drivers/cpufreq/bmips-cpufreq.c table[i].frequency = cpu_freq / (1 << i); table 80 drivers/cpufreq/bmips-cpufreq.c table[i].driver_data = i; table 82 drivers/cpufreq/bmips-cpufreq.c table[i].frequency = CPUFREQ_TABLE_END; table 84 drivers/cpufreq/bmips-cpufreq.c return table; table 404 drivers/cpufreq/brcmstb-avs-cpufreq.c struct cpufreq_frequency_table *table; table 413 drivers/cpufreq/brcmstb-avs-cpufreq.c table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table), table 415 drivers/cpufreq/brcmstb-avs-cpufreq.c if (!table) table 422 drivers/cpufreq/brcmstb-avs-cpufreq.c table[i].frequency = brcm_avs_get_frequency(priv->base); table 423 drivers/cpufreq/brcmstb-avs-cpufreq.c table[i].driver_data = i; table 425 drivers/cpufreq/brcmstb-avs-cpufreq.c table[i].frequency = CPUFREQ_TABLE_END; table 432 drivers/cpufreq/brcmstb-avs-cpufreq.c return table; table 166 drivers/cpufreq/cpufreq.c struct cpufreq_frequency_table *table, table 169 drivers/cpufreq/cpufreq.c policy->freq_table = table; table 19 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *pos, *table = policy->freq_table; table 21 drivers/cpufreq/freq_table.c if (!table) table 24 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry(pos, table) table 33 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *table) table 40 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry(pos, table) { table 47 drivers/cpufreq/freq_table.c pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq); table 64 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *table) table 75 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry(pos, table) { table 125 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *table = policy->freq_table; table 142 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry_idx(pos, table, i) { table 182 drivers/cpufreq/freq_table.c freq > table[optimal.driver_data].frequency)) { table 200 drivers/cpufreq/freq_table.c table[index].frequency); table 208 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *pos, *table = policy->freq_table; table 211 drivers/cpufreq/freq_table.c if (unlikely(!table)) { table 216 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry_idx(pos, table, idx) table 231 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *pos, *table = policy->freq_table; table 233 drivers/cpufreq/freq_table.c if (!table) table 236 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry(pos, table) { table 295 drivers/cpufreq/freq_table.c struct cpufreq_frequency_table *pos, *table = policy->freq_table; table 301 drivers/cpufreq/freq_table.c cpufreq_for_each_valid_entry(pos, table) { table 89 drivers/cpufreq/pxa3xx-cpufreq.c struct cpufreq_frequency_table *table; table 92 drivers/cpufreq/pxa3xx-cpufreq.c table = kcalloc(num + 1, sizeof(*table), GFP_KERNEL); table 93 drivers/cpufreq/pxa3xx-cpufreq.c if (table == NULL) table 97 drivers/cpufreq/pxa3xx-cpufreq.c table[i].driver_data = i; table 98 drivers/cpufreq/pxa3xx-cpufreq.c table[i].frequency = freqs[i].cpufreq_mhz * 1000; table 100 drivers/cpufreq/pxa3xx-cpufreq.c table[num].driver_data = i; table 101 drivers/cpufreq/pxa3xx-cpufreq.c table[num].frequency = CPUFREQ_TABLE_END; table 105 drivers/cpufreq/pxa3xx-cpufreq.c pxa3xx_freqs_table = table; table 107 drivers/cpufreq/pxa3xx-cpufreq.c policy->freq_table = table; table 91 drivers/cpufreq/qcom-cpufreq-hw.c struct cpufreq_frequency_table *table; table 93 drivers/cpufreq/qcom-cpufreq-hw.c table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL); table 94 drivers/cpufreq/qcom-cpufreq-hw.c if (!table) table 114 drivers/cpufreq/qcom-cpufreq-hw.c table[i].frequency = freq; table 119 drivers/cpufreq/qcom-cpufreq-hw.c table[i].frequency = CPUFREQ_ENTRY_INVALID; table 127 drivers/cpufreq/qcom-cpufreq-hw.c struct cpufreq_frequency_table *prev = &table[i - 1]; table 145 drivers/cpufreq/qcom-cpufreq-hw.c table[i].frequency = CPUFREQ_TABLE_END; table 146 drivers/cpufreq/qcom-cpufreq-hw.c policy->freq_table = table; table 29 drivers/cpufreq/qoriq-cpufreq.c struct cpufreq_frequency_table *table; table 134 drivers/cpufreq/qoriq-cpufreq.c struct cpufreq_frequency_table table; table 150 drivers/cpufreq/qoriq-cpufreq.c table.driver_data = freq_table[i].driver_data; table 151 drivers/cpufreq/qoriq-cpufreq.c table.frequency = freq_table[i].frequency; table 154 drivers/cpufreq/qoriq-cpufreq.c freq_table[ind].driver_data = table.driver_data; table 155 drivers/cpufreq/qoriq-cpufreq.c freq_table[ind].frequency = table.frequency; table 167 drivers/cpufreq/qoriq-cpufreq.c struct cpufreq_frequency_table *table; table 193 drivers/cpufreq/qoriq-cpufreq.c table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL); table 194 drivers/cpufreq/qoriq-cpufreq.c if (!table) table 201 drivers/cpufreq/qoriq-cpufreq.c table[i].frequency = freq / 1000; table 202 drivers/cpufreq/qoriq-cpufreq.c table[i].driver_data = i; table 204 drivers/cpufreq/qoriq-cpufreq.c freq_table_redup(table, count); table 205 drivers/cpufreq/qoriq-cpufreq.c freq_table_sort(table, count); table 206 drivers/cpufreq/qoriq-cpufreq.c table[i].frequency = CPUFREQ_TABLE_END; table 207 drivers/cpufreq/qoriq-cpufreq.c policy->freq_table = table; table 208 drivers/cpufreq/qoriq-cpufreq.c data->table = table; table 238 drivers/cpufreq/qoriq-cpufreq.c kfree(data->table); table 251 drivers/cpufreq/qoriq-cpufreq.c parent = data->pclk[data->table[index].driver_data]; table 198 drivers/cpufreq/s3c2440-cpufreq.c struct cpufreq_frequency_table *table, table 212 drivers/cpufreq/s3c2440-cpufreq.c index = s3c_cpufreq_addfreq(table, index, table_size, freq); table 223 drivers/cpufreq/s3c2440-cpufreq.c struct cpufreq_frequency_table *table, table 234 drivers/cpufreq/s3c2440-cpufreq.c table, table_size); table 23 drivers/cpufreq/sfi-cpufreq.c static int sfi_parse_freq(struct sfi_table_header *table) table 29 drivers/cpufreq/sfi-cpufreq.c sb = (struct sfi_table_simple *)table; table 26 drivers/cpufreq/sparc-us2e-cpufreq.c struct cpufreq_frequency_table table[6]; table 278 drivers/cpufreq/sparc-us2e-cpufreq.c struct cpufreq_frequency_table *table = table 279 drivers/cpufreq/sparc-us2e-cpufreq.c &us2e_freq_table[cpu].table[0]; table 281 drivers/cpufreq/sparc-us2e-cpufreq.c table[0].driver_data = 0; table 282 drivers/cpufreq/sparc-us2e-cpufreq.c table[0].frequency = clock_tick / 1; table 283 drivers/cpufreq/sparc-us2e-cpufreq.c table[1].driver_data = 1; table 284 drivers/cpufreq/sparc-us2e-cpufreq.c table[1].frequency = clock_tick / 2; table 285 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].driver_data = 2; table 286 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].frequency = clock_tick / 4; table 287 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].driver_data = 3; table 288 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].frequency = clock_tick / 6; table 289 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].driver_data = 4; table 290 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].frequency = clock_tick / 8; table 291 drivers/cpufreq/sparc-us2e-cpufreq.c table[2].driver_data = 5; table 292 drivers/cpufreq/sparc-us2e-cpufreq.c table[3].frequency = CPUFREQ_TABLE_END; table 296 drivers/cpufreq/sparc-us2e-cpufreq.c policy->freq_table = table; table 25 drivers/cpufreq/sparc-us3-cpufreq.c struct cpufreq_frequency_table table[4]; table 126 drivers/cpufreq/sparc-us3-cpufreq.c struct cpufreq_frequency_table *table = table 127 drivers/cpufreq/sparc-us3-cpufreq.c &us3_freq_table[cpu].table[0]; table 129 drivers/cpufreq/sparc-us3-cpufreq.c table[0].driver_data = 0; table 130 drivers/cpufreq/sparc-us3-cpufreq.c table[0].frequency = clock_tick / 1; table 131 drivers/cpufreq/sparc-us3-cpufreq.c table[1].driver_data = 1; table 132 drivers/cpufreq/sparc-us3-cpufreq.c table[1].frequency = clock_tick / 2; table 133 drivers/cpufreq/sparc-us3-cpufreq.c table[2].driver_data = 2; table 134 drivers/cpufreq/sparc-us3-cpufreq.c table[2].frequency = clock_tick / 32; table 135 drivers/cpufreq/sparc-us3-cpufreq.c table[3].driver_data = 0; table 136 drivers/cpufreq/sparc-us3-cpufreq.c table[3].frequency = CPUFREQ_TABLE_END; table 140 drivers/cpufreq/sparc-us3-cpufreq.c policy->freq_table = table; table 43 drivers/cpufreq/tegra186-cpufreq.c struct cpufreq_frequency_table *table; table 73 drivers/cpufreq/tegra186-cpufreq.c policy->freq_table = cluster->table; table 107 drivers/cpufreq/tegra186-cpufreq.c struct cpufreq_frequency_table *table; table 133 drivers/cpufreq/tegra186-cpufreq.c table = ERR_PTR(err); table 150 drivers/cpufreq/tegra186-cpufreq.c table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table), table 152 drivers/cpufreq/tegra186-cpufreq.c if (!table) { table 153 drivers/cpufreq/tegra186-cpufreq.c table = ERR_PTR(-ENOMEM); table 172 drivers/cpufreq/tegra186-cpufreq.c point = &table[j++]; table 178 drivers/cpufreq/tegra186-cpufreq.c table[j].frequency = CPUFREQ_TABLE_END; table 183 drivers/cpufreq/tegra186-cpufreq.c return table; table 219 drivers/cpufreq/tegra186-cpufreq.c cluster->table = init_vhint_table( table 221 drivers/cpufreq/tegra186-cpufreq.c if (IS_ERR(cluster->table)) { table 222 drivers/cpufreq/tegra186-cpufreq.c err = PTR_ERR(cluster->table); table 306 drivers/crypto/ccp/ccp-crypto-main.c struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, table 311 drivers/crypto/ccp/ccp-crypto-main.c for (sg = table->sgl; sg; sg = sg_next(sg)) table 271 drivers/crypto/ccp/ccp-crypto.h struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, table 1726 drivers/crypto/n2_core.c struct spu_queue **table) table 1738 drivers/crypto/n2_core.c if (table[*id] != NULL) { table 1744 drivers/crypto/n2_core.c table[*id] = p; table 1753 drivers/crypto/n2_core.c irq_handler_t handler, struct spu_queue **table) table 1771 drivers/crypto/n2_core.c err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); table 1785 drivers/crypto/n2_core.c irq_handler_t handler, struct spu_queue **table) table 1798 drivers/crypto/n2_core.c exec_name, q_type, handler, table); table 331 drivers/edac/i7300_edac.c static const char *get_err_from_table(const char *table[], int size, int pos) table 336 drivers/edac/i7300_edac.c if (unlikely(!table[pos])) table 339 drivers/edac/i7300_edac.c return table[pos]; table 342 drivers/edac/i7300_edac.c #define GET_ERR_FROM_TABLE(table, pos) \ table 343 drivers/edac/i7300_edac.c get_err_from_table(table, ARRAY_SIZE(table), pos) table 454 drivers/edac/i7core_edac.c const struct pci_id_table *table) table 462 drivers/edac/i7core_edac.c i7core_dev->pdev = kcalloc(table->n_devs, sizeof(*i7core_dev->pdev), table 470 drivers/edac/i7core_edac.c i7core_dev->n_devs = table->n_devs; table 1261 drivers/edac/i7core_edac.c static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table) table 1271 drivers/edac/i7core_edac.c while (table && table->descr) { table 1272 drivers/edac/i7core_edac.c pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL); table 1278 drivers/edac/i7core_edac.c table++; table 1306 drivers/edac/i7core_edac.c const struct pci_id_table *table, table 1311 drivers/edac/i7core_edac.c const struct pci_id_descr *dev_descr = &table->descr[devno]; table 1365 drivers/edac/i7core_edac.c i7core_dev = alloc_i7core_dev(socket, table); table 1427 drivers/edac/i7core_edac.c const struct pci_id_table *table = pci_dev_table; table 1431 drivers/edac/i7core_edac.c while (table && table->descr) { table 1432 drivers/edac/i7core_edac.c for (i = 0; i < table->n_devs; i++) { table 1435 drivers/edac/i7core_edac.c rc = i7core_get_onedevice(&pdev, table, i, table 1439 drivers/edac/i7core_edac.c i = table->n_devs; table 1447 drivers/edac/i7core_edac.c table++; table 143 drivers/edac/sb_edac.c static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, table 146 drivers/edac/sb_edac.c return GET_BITFIELD(reg, table[interleave].start, table 147 drivers/edac/sb_edac.c table[interleave].end); table 760 drivers/edac/sb_edac.c const struct pci_id_table *table) table 768 drivers/edac/sb_edac.c sbridge_dev->pdev = kcalloc(table->n_devs_per_imc, table 779 drivers/edac/sb_edac.c sbridge_dev->n_devs = table->n_devs_per_imc; table 2331 drivers/edac/sb_edac.c const struct pci_id_table *table, table 2336 drivers/edac/sb_edac.c const struct pci_id_descr *dev_descr = &table->descr[devno]; table 2387 drivers/edac/sb_edac.c sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table); table 2409 drivers/edac/sb_edac.c if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock) table 2446 drivers/edac/sb_edac.c const struct pci_id_table *table) table 2453 drivers/edac/sb_edac.c if (table->type == KNIGHTS_LANDING) table 2455 drivers/edac/sb_edac.c while (table && table->descr) { table 2456 drivers/edac/sb_edac.c for (i = 0; i < table->n_devs_per_sock; i++) { table 2458 drivers/edac/sb_edac.c table->descr[i].dev_id != table 2459 drivers/edac/sb_edac.c table->descr[i-1].dev_id) { table 2464 drivers/edac/sb_edac.c table, i, multi_bus); table 2467 drivers/edac/sb_edac.c i = table->n_devs_per_sock; table 2475 drivers/edac/sb_edac.c table++; table 27 drivers/firmware/efi/efi-bgrt.c void __init efi_bgrt_init(struct acpi_table_header *table) table 39 drivers/firmware/efi/efi-bgrt.c if (table->length < sizeof(bgrt_tab)) { table 41 drivers/firmware/efi/efi-bgrt.c table->length, sizeof(bgrt_tab)); table 44 drivers/firmware/efi/efi-bgrt.c *bgrt = *(struct acpi_table_bgrt *)table; table 484 drivers/firmware/efi/efi.c unsigned long table, table 492 drivers/firmware/efi/efi.c *(table_types[i].ptr) = table; table 495 drivers/firmware/efi/efi.c table_types[i].name, table); table 514 drivers/firmware/efi/efi.c unsigned long table; table 519 drivers/firmware/efi/efi.c table64 = ((efi_config_table_64_t *)tablep)->table; table 520 drivers/firmware/efi/efi.c table = table64; table 530 drivers/firmware/efi/efi.c table = ((efi_config_table_32_t *)tablep)->table; table 533 drivers/firmware/efi/efi.c if (!match_config_table(&guid, table, common_tables)) table 534 drivers/firmware/efi/efi.c match_config_table(&guid, table, arch_tables); table 941 drivers/firmware/efi/libstub/efi-stub-helper.c return (void *)(unsigned long)tables[i].table; \ table 836 drivers/firmware/iscsi_ibft.c struct acpi_table_header *table = NULL; table 842 drivers/firmware/iscsi_ibft.c acpi_get_table(ibft_signs[i].sign, 0, &table); table 843 drivers/firmware/iscsi_ibft.c ibft_addr = (struct acpi_table_ibft *)table; table 4237 drivers/gpio/gpiolib.c void gpiod_add_lookup_table(struct gpiod_lookup_table *table) table 4241 drivers/gpio/gpiolib.c list_add_tail(&table->list, &gpio_lookup_list); table 4251 drivers/gpio/gpiolib.c void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) table 4255 drivers/gpio/gpiolib.c list_del(&table->list); table 4291 drivers/gpio/gpiolib.c struct gpiod_lookup_table *table; table 4295 drivers/gpio/gpiolib.c list_for_each_entry(table, &gpio_lookup_list, list) { table 4296 drivers/gpio/gpiolib.c if (table->dev_id && dev_id) { table 4301 drivers/gpio/gpiolib.c if (!strcmp(table->dev_id, dev_id)) table 4308 drivers/gpio/gpiolib.c if (dev_id == table->dev_id) table 4312 drivers/gpio/gpiolib.c table = NULL; table 4316 drivers/gpio/gpiolib.c return table; table 4323 drivers/gpio/gpiolib.c struct gpiod_lookup_table *table; table 4326 drivers/gpio/gpiolib.c table = gpiod_find_lookup_table(dev); table 4327 drivers/gpio/gpiolib.c if (!table) table 4330 drivers/gpio/gpiolib.c for (p = &table->table[0]; p->chip_label; p++) { table 4375 drivers/gpio/gpiolib.c struct gpiod_lookup_table *table; table 4379 drivers/gpio/gpiolib.c table = gpiod_find_lookup_table(dev); table 4380 drivers/gpio/gpiolib.c if (!table) table 4383 drivers/gpio/gpiolib.c for (p = &table->table[0]; p->chip_label; p++) { table 310 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h #define amdgpu_dpm_get_pp_table(adev, table) \ table 311 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)) table 536 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c char *table = NULL; table 540 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c size = smu_sys_get_pp_table(&adev->smu, (void **)&table); table 545 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c size = amdgpu_dpm_get_pp_table(adev, &table); table 552 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c memcpy(buf, table, size); table 32 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table) table 34 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c return amdgpu_bo_kmap(table, NULL); table 36 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) table 40 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c r = amdgpu_ttm_alloc_gart(&table->tbo); table 44 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c if (table->shadow) table 45 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); table 138 drivers/gpu/drm/amd/amdgpu/kv_dpm.c ATOM_AVAILABLE_SCLK_LIST *table) table 145 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table[i].ulSupportedSCLK > prev_sclk) { table 147 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table[i].ulSupportedSCLK; table 149 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table[i].usVoltageIndex; table 150 drivers/gpu/drm/amd/amdgpu/kv_dpm.c prev_sclk = table[i].ulSupportedSCLK; table 160 drivers/gpu/drm/amd/amdgpu/kv_dpm.c ATOM_AVAILABLE_SCLK_LIST *table) table 165 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table[i].ulSupportedSCLK != 0) { table 166 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table 167 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table[i].usVoltageID; table 168 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = table 169 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table[i].usVoltageIndex; table 802 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 805 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table && table->count) { table 807 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].clk == pi->boot_pl.sclk) table 814 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 817 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->num_max_dpm_entries == 0) table 821 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) table 904 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_uvd_clock_voltage_dependency_table *table = table 910 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table == NULL || table->count == 0) table 914 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 916 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (pi->high_voltage_t < table->entries[i].v)) table 919 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); table 920 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); table 921 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); table 924 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); table 926 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); table 929 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].vclk, false, ÷rs); table 935 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].dclk, false, ÷rs); table 977 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_vce_clock_voltage_dependency_table *table = table 981 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table == NULL || table->count == 0) table 985 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 987 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->high_voltage_t < table->entries[i].v) table 990 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); table 991 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); table 994 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); table 997 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].evclk, false, ÷rs); table 1038 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1044 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table == NULL || table->count == 0) table 1048 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 1050 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->high_voltage_t < table->entries[i].v) table 1053 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); table 1054 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); table 1057 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].clk); table 1060 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].clk, false, ÷rs); table 1104 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1110 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table == NULL || table->count == 0) table 1114 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 1115 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); table 1116 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); table 1119 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].clk, false, ÷rs); table 1163 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1166 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table && table->count) { table 1169 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) table 1171 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) table 1173 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) table 1175 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) table 1177 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) table 1186 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 1190 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) table 1192 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) table 1194 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) table 1196 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) table 1198 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) table 1496 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_uvd_clock_voltage_dependency_table *table = table 1502 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->count) table 1503 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_boot_level = table->count - 1; table 1532 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_vce_clock_voltage_dependency_table *table = table 1535 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 1536 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].evclk >= evclk) table 1548 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_vce_clock_voltage_dependency_table *table = table 1554 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->vce_boot_level = table->count - 1; table 1582 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1588 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->samu_boot_level = table->count - 1; table 1613 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1616 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 1617 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].clk >= 0) /* XXX */ table 1621 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (i >= table->count) table 1622 drivers/gpu/drm/amd/amdgpu/kv_dpm.c i = table->count - 1; table 1646 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1652 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->acp_boot_level = table->count - 1; table 1777 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 1780 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table && table->count) { table 1782 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if ((table->entries[i].clk >= new_ps->levels[0].sclk) || table 1790 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) table 1796 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > table 1797 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) table 1803 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 1807 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || table 1815 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].sclk_frequency <= table 1823 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[pi->highest_valid].sclk_frequency) > table 1824 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (table->entries[pi->lowest_valid].sclk_frequency - table 2035 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_and_voltage_limits *table) table 2041 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->sclk = table 2043 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->vddc = table 2048 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->mclk = pi->sys_info.nbp_memory_clock[0]; table 2173 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 2177 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table && table->count) { table 2178 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = table->count - 1; i >= 0; i--) { table 2180 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= table 2187 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 2190 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { table 2192 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= table 2214 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 2234 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = table->count - 1; i >= 0; i--) { table 2235 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (stable_p_state_sclk >= table->entries[i].clk) { table 2236 drivers/gpu/drm/amd/amdgpu/kv_dpm.c stable_p_state_sclk = table->entries[i].clk; table 2242 drivers/gpu/drm/amd/amdgpu/kv_dpm.c stable_p_state_sclk = table->entries[0].clk; table 2259 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table && table->count) { table 2265 drivers/gpu/drm/amd/amdgpu/kv_dpm.c ps->levels[i].sclk = table->entries[limit].clk; table 2269 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 2277 drivers/gpu/drm/amd/amdgpu/kv_dpm.c ps->levels[i].sclk = table->entries[limit].sclk_frequency; table 2418 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct amdgpu_clock_voltage_dependency_table *table = table 2421 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table && table->count) { table 2425 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->count; i++) { table 2428 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) table 2431 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_set_divider_value(adev, i, table->entries[i].clk); table 2434 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].v); table 2441 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 2445 drivers/gpu/drm/amd/amdgpu/kv_dpm.c for (i = 0; i < table->num_max_dpm_entries; i++) { table 2448 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) table 2451 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); table 2452 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_set_vid(adev, i, table->entries[i].vid_2bit); table 1839 drivers/gpu/drm/amd/amdgpu/si_dpm.c const struct atom_voltage_table *table, table 2636 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct amdgpu_cac_leakage_table *table = table 2641 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table == NULL) table 2647 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->count; i++) { table 2648 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->entries[i].vddc > *max) table 2649 drivers/gpu/drm/amd/amdgpu/si_dpm.c *max = table->entries[i].vddc; table 2650 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->entries[i].vddc < *min) table 2651 drivers/gpu/drm/amd/amdgpu/si_dpm.c *min = table->entries[i].vddc; table 3040 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct amdgpu_vce_clock_voltage_dependency_table *table = table 3044 drivers/gpu/drm/amd/amdgpu/si_dpm.c (table && (table->count == 0))) { table 3049 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->count; i++) { table 3050 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((evclk <= table->entries[i].evclk) && table 3051 drivers/gpu/drm/amd/amdgpu/si_dpm.c (ecclk <= table->entries[i].ecclk)) { table 3052 drivers/gpu/drm/amd/amdgpu/si_dpm.c *voltage = table->entries[i].v; table 3060 drivers/gpu/drm/amd/amdgpu/si_dpm.c *voltage = table->entries[table->count - 1].v; table 3208 drivers/gpu/drm/amd/amdgpu/si_dpm.c static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) table 3212 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->count; i++) table 3213 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (voltage <= table->entries[i].value) table 3214 drivers/gpu/drm/amd/amdgpu/si_dpm.c return table->entries[i].value; table 3216 drivers/gpu/drm/amd/amdgpu/si_dpm.c return table->entries[table->count - 1].value; table 3250 drivers/gpu/drm/amd/amdgpu/si_dpm.c static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, table 3255 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((table == NULL) || (table->count == 0)) { table 3260 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->count; i++) { table 3261 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (clock < table->entries[i].clk) table 3262 drivers/gpu/drm/amd/amdgpu/si_dpm.c clock = table->entries[i].clk; table 3267 drivers/gpu/drm/amd/amdgpu/si_dpm.c static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, table 3272 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((table == NULL) || (table->count == 0)) table 3275 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i= 0; i < table->count; i++) { table 3276 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (clock <= table->entries[i].clk) { table 3277 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (*voltage < table->entries[i].v) table 3278 drivers/gpu/drm/amd/amdgpu/si_dpm.c *voltage = (u16)((table->entries[i].v < max_voltage) ? table 3279 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[i].v : max_voltage); table 4356 drivers/gpu/drm/amd/amdgpu/si_dpm.c const struct atom_voltage_table *table, table 4361 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((table == NULL) || (limits == NULL)) table 4364 drivers/gpu/drm/amd/amdgpu/si_dpm.c data = table->mask_low; table 4373 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->count != num_levels) table 4502 drivers/gpu/drm/amd/amdgpu/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4507 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); table 4511 drivers/gpu/drm/amd/amdgpu/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4527 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); table 4528 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = table 4533 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->maxVDDCIndexInPPTable = i; table 4540 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); table 4542 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = table 4548 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); table 4550 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = table 4557 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); table 4559 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = table 4574 drivers/gpu/drm/amd/amdgpu/si_dpm.c const struct atom_voltage_table *table, table 4579 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->count; i++) { table 4580 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (value <= table->entries[i].value) { table 4582 drivers/gpu/drm/amd/amdgpu/si_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); table 4587 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (i >= table->count) table 4832 drivers/gpu/drm/amd/amdgpu/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4841 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vDLL_CNTL = table 4843 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = table 4845 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = table 4847 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = table 4849 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = table 4851 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = table 4853 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = table 4855 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_SS = table 4857 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.vMPLL_SS2 = table 4860 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mclk.mclk_value = table 4863 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 4865 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 4867 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 4869 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = table 4871 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = table 4873 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = table 4876 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].sclk.sclk_value = table 4879 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].arbRefreshState = table 4882 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].ACIndex = 0; table 4886 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->initialState.levels[0].vddc); table 4892 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->initialState.levels[0].vddc, table 4896 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].vddc.index, table 4897 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->initialState.levels[0].std_vddc); table 4904 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->initialState.levels[0].vddci); table 4912 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->initialState.levels[0].vddc); table 4914 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd); table 4917 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].aT = cpu_to_be32(reg); table 4918 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table 4919 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; table 4922 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].strobeMode = table 4927 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; table 4929 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].mcFlags = 0; table 4932 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levelCount = 1; table 4934 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table 4936 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].dpm2.MaxPS = 0; table 4937 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].dpm2.NearTDPDec = 0; table 4938 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].dpm2.AboveSafeInc = 0; table 4939 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].dpm2.BelowSafeInc = 0; table 4940 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; table 4943 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); table 4946 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); table 4952 drivers/gpu/drm/amd/amdgpu/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4971 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState = table->initialState; table 4973 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 4977 drivers/gpu/drm/amd/amdgpu/si_dpm.c pi->acpi_vddc, &table->ACPIState.levels[0].vddc); table 4982 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].vddc, &std_vddc); table 4985 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].vddc.index, table 4986 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].std_vddc); table 4988 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; table 4996 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].vddc); table 5000 drivers/gpu/drm/amd/amdgpu/si_dpm.c pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); table 5005 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].vddc, &std_vddc); table 5009 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].vddc.index, table 5010 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].std_vddc); table 5012 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].gen2PCIE = table 5024 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].vddc); table 5031 drivers/gpu/drm/amd/amdgpu/si_dpm.c &table->ACPIState.levels[0].vddci); table 5042 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vDLL_CNTL = table 5044 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = table 5046 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = table 5048 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = table 5050 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = table 5052 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = table 5054 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = table 5056 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_SS = table 5058 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_SS2 = table 5061 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 5063 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 5065 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 5067 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = table 5070 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].mclk.mclk_value = 0; table 5071 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 5073 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd); table 5076 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].ACIndex = 0; table 5078 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].dpm2.MaxPS = 0; table 5079 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].dpm2.NearTDPDec = 0; table 5080 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; table 5081 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; table 5082 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; table 5085 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); table 5088 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); table 5162 drivers/gpu/drm/amd/amdgpu/si_dpm.c SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; table 5167 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_populate_smc_voltage_tables(adev, table); table 5172 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; table 5175 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; table 5178 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; table 5183 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 5187 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; table 5191 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 5194 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 5197 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; table 5200 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; table 5206 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); table 5210 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_populate_smc_acpi_state(adev, table); table 5214 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->driverState = table->initialState; table 5222 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_populate_ulv_state(adev, &table->ULVState); table 5236 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->ULVState = table->initialState; table 5240 drivers/gpu/drm/amd/amdgpu/si_dpm.c (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), table 5817 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct si_mc_reg_table *table) table 5822 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0, j = table->last; i < table->last; i++) { table 5825 drivers/gpu/drm/amd/amdgpu/si_dpm.c switch (table->mc_reg_address[i].s1) { table 5828 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; table 5829 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; table 5830 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (k = 0; k < table->num_entries; k++) table 5831 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5833 drivers/gpu/drm/amd/amdgpu/si_dpm.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 5839 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; table 5840 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; table 5841 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (k = 0; k < table->num_entries; k++) { table 5842 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5844 drivers/gpu/drm/amd/amdgpu/si_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 5846 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 5853 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; table 5854 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; table 5855 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (k = 0; k < table->num_entries; k++) table 5856 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5857 drivers/gpu/drm/amd/amdgpu/si_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; table 5863 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; table 5864 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; table 5865 drivers/gpu/drm/amd/amdgpu/si_dpm.c for(k = 0; k < table->num_entries; k++) table 5866 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5868 drivers/gpu/drm/amd/amdgpu/si_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 5876 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->last = j; table 5935 drivers/gpu/drm/amd/amdgpu/si_dpm.c static void si_set_valid_flag(struct si_mc_reg_table *table) table 5939 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->last; i++) { table 5940 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (j = 1; j < table->num_entries; j++) { table 5941 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { table 5942 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->valid_flag |= 1 << i; table 5949 drivers/gpu/drm/amd/amdgpu/si_dpm.c static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) table 5954 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->last; i++) table 5955 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? table 5956 drivers/gpu/drm/amd/amdgpu/si_dpm.c address : table->mc_reg_address[i].s1; table 5960 drivers/gpu/drm/amd/amdgpu/si_dpm.c static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, table 5965 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) table 5967 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->num_entries > MAX_AC_TIMING_ENTRIES) table 5970 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->last; i++) table 5971 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 5972 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_table->last = table->last; table 5974 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->num_entries; i++) { table 5976 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[i].mclk_max; table 5977 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (j = 0; j < table->last; j++) { table 5979 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->mc_reg_table_entry[i].mc_data[j]; table 5982 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_table->num_entries = table->num_entries; table 5990 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct atom_mc_reg_table *table; table 5995 drivers/gpu/drm/amd/amdgpu/si_dpm.c table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); table 5996 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (!table) table 6014 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); table 6018 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_copy_vbios_mc_reg_table(table, si_table); table 6031 drivers/gpu/drm/amd/amdgpu/si_dpm.c kfree(table); table 6323 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct amdgpu_clock_voltage_dependency_table *table) table 6329 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table) { table 6330 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (i = 0; i < table->count; i++) { table 6332 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[i].v, table 6335 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[i].v = leakage_voltage; table 6345 drivers/gpu/drm/amd/amdgpu/si_dpm.c for (j = (table->count - 2); j >= 0; j--) { table 6346 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? table 6347 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[j].v : table->entries[j + 1].v; table 709 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c struct amdgpu_mm_table *table) table 712 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c uint64_t addr = table->gpu_addr; table 713 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr; table 153 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c struct amdgpu_mm_table *table) table 156 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c uint64_t addr = table->gpu_addr; table 157 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr; table 57 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c #define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table) table 137 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c ATOM_OBJECT_TABLE *table; table 141 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset); table 143 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c if (!table) table 146 drivers/gpu/drm/amd/display/dc/bios/bios_parser.c return table->ucNumberOfObjects; table 112 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) table 1229 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c const struct link_mst_stream_allocation_table *table) table 1248 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c if (table->stream_count >= 1) { table 1250 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c &table->stream_allocations[0], table 1262 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c if (table->stream_count >= 2) { table 1264 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c &table->stream_allocations[1], table 1276 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c if (table->stream_count >= 3) { table 1278 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c &table->stream_allocations[2], table 1290 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c if (table->stream_count >= 4) { table 1292 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c &table->stream_allocations[3], table 251 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h const struct link_mst_stream_allocation_table *table); table 1195 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c const struct link_mst_stream_allocation_table *table) table 1215 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c if (table->stream_count >= 1) { table 1217 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c &table->stream_allocations[0], table 1229 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c if (table->stream_count >= 2) { table 1231 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c &table->stream_allocations[1], table 1243 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c if (table->stream_count >= 3) { table 1245 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c &table->stream_allocations[2], table 1257 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c if (table->stream_count >= 4) { table 1259 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c &table->stream_allocations[3], table 491 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h const struct link_mst_stream_allocation_table *table); table 39 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c table = qp_table_##mode##_##bpc##bpc_##max; \ table 50 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c const struct qp_entry *table = 0L; table 75 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c if (table == 0) table 78 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index = (bpp - table[0].bpp) * 2; table 86 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c memcpy(qps, table[index].qps, sizeof(qp_set)); table 163 drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h const struct link_mst_stream_allocation_table *table); table 74 drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c const struct link_mst_stream_allocation_table *table) {} table 208 drivers/gpu/drm/amd/display/modules/power/power_helpers.c struct iram_table_v_2 *table) table 214 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_thresholds[0] = 0; table 215 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_offsets[0] = params.backlight_lut_array[0]; table 216 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_thresholds[num_entries-1] = 0xFFFF; table 217 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_offsets[num_entries-1] = table 232 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_thresholds[i] = table 234 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_offsets[i] = table 240 drivers/gpu/drm/amd/display/modules/power/power_helpers.c struct iram_table_v_2_2 *table) table 246 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_thresholds[0] = 0; table 247 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_offsets[0] = params.backlight_lut_array[0]; table 248 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_thresholds[num_entries-1] = 0xFFFF; table 249 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_offsets[num_entries-1] = table 264 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_thresholds[i] = table 266 drivers/gpu/drm/amd/display/modules/power/power_helpers.c table->backlight_offsets[i] = table 259 drivers/gpu/drm/amd/include/kgd_pp_interface.h int (*get_pp_table)(void *handle, char **table); table 645 drivers/gpu/drm/amd/powerplay/amd_powerplay.c static int pp_dpm_get_pp_table(void *handle, char **table) table 654 drivers/gpu/drm/amd/powerplay/amd_powerplay.c *table = (char *)hwmgr->soft_pp_table; table 438 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c struct smu_table *table = NULL; table 445 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c table = &smu_table->tables[table_index]; table 448 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c memcpy(table->cpu_addr, table_data, table->size); table 451 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c upper_32_bits(table->mc_address)); table 455 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c lower_32_bits(table->mc_address)); table 469 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c memcpy(table_data, table->cpu_addr, table->size); table 495 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c int smu_sys_get_pp_table(struct smu_context *smu, void **table) table 503 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c *table = smu_table->hardcode_pptable; table 505 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c *table = smu_table->power_play_table; table 761 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, table 768 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, table 1145 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c static uint32_t arcturus_find_lowest_dpm_level(struct arcturus_single_dpm_table *table) table 1149 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c for (i = 0; i < table->count; i++) { table 1150 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (table->dpm_levels[i].enabled) table 1153 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (i >= table->count) { table 1155 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c table->dpm_levels[i].enabled = true; table 1161 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c static uint32_t arcturus_find_highest_dpm_level(struct arcturus_single_dpm_table *table) table 1165 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (table->count <= 0) { table 1169 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (table->count > MAX_DPM_NUMBER) { table 1174 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c for (i = table->count - 1; i >= 0; i--) { table 1175 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (table->dpm_levels[i].enabled) table 1180 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c table->dpm_levels[i].enabled = true; table 50 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c pp_atomctrl_mc_reg_table *table) table 64 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->mc_reg_table_entry[num_ranges].mclk_max = table 68 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c for (i = 0, j = 1; i < table->last; i++) { table 69 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c if ((table->mc_reg_address[i].uc_pre_reg_data & table 71 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->mc_reg_table_entry[num_ranges].mc_data[i] = table 74 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c } else if ((table->mc_reg_address[i].uc_pre_reg_data & table 76 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->mc_reg_table_entry[num_ranges].mc_data[i] = table 77 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->mc_reg_table_entry[num_ranges].mc_data[i-1]; table 89 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->num_entries = num_ranges; table 103 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c pp_atomctrl_mc_reg_table *table) table 118 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->mc_reg_address[i].s1 = table 120 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->mc_reg_address[i].uc_pre_reg_data = table 128 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->last = i; table 135 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c pp_atomctrl_mc_reg_table *table) table 158 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c result = atomctrl_set_mc_reg_address_table(reg_block, table); table 163 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c reg_block, table); table 1199 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c ATOM_ASIC_INTERNAL_SS_INFO *table = NULL; table 1203 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table = (ATOM_ASIC_INTERNAL_SS_INFO *) table 1208 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c return table; table 1219 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c ATOM_ASIC_INTERNAL_SS_INFO *table; table 1225 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table = asic_internal_ss_get_ss_table(hwmgr->adev); table 1227 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c if (NULL == table) table 1230 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c ssInfo = &table->asSpreadSpectrum[0]; table 1232 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c while (((uint8_t *)ssInfo - (uint8_t *)table) < table 1233 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c le16_to_cpu(table->sHeader.usStructureSize)) { table 1249 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && table 1250 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || table 1251 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) { table 1360 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table) table 1374 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; table 1375 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; table 1376 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->entry[i].usFcw_pcc = table 1378 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->entry[i].usFcw_trans_upper = table 1380 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c table->entry[i].usRcw_trans_lower = table 296 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h extern int atomctrl_initialize_mc_reg_table(struct pp_hwmgr *hwmgr, uint8_t module_index, pp_atomctrl_mc_reg_table *table); table 320 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); table 161 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c phm_ppt_v1_voltage_lookup_table *table; table 171 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c table = kzalloc(table_size, GFP_KERNEL); table 173 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c if (NULL == table) table 176 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c table->count = vddc_lookup_pp_tables->ucNumEntries; table 181 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, table, i); table 192 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c *lookup_table = table; table 322 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c struct phm_clock_array *table; table 331 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c table = kzalloc(table_size, GFP_KERNEL); table 333 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c if (NULL == table) table 336 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c table->count = (uint32_t)clk_volt_pp_table->count; table 338 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c for (i = 0; i < table->count; i++) { table 342 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c table->values[i] = (uint32_t)dep_record->clk; table 344 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c *clk_table = table; table 590 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c const PPTable_Generic_SubTable_Header * table table 609 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c if (table->ucRevId < 3) { table 611 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c (ATOM_Tonga_PowerTune_Table *)table; table 638 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c (ATOM_Fiji_PowerTune_Table *)table; table 312 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PowerTune_Table *table, table 324 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usTDP = le16_to_cpu(table->usTDP); table 325 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usConfigurableTDP = le16_to_cpu(table->usConfigurableTDP); table 326 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usTDC = le16_to_cpu(table->usTDC); table 327 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usBatteryPowerLimit = le16_to_cpu(table->usBatteryPowerLimit); table 328 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usSmallPowerLimit = le16_to_cpu(table->usSmallPowerLimit); table 329 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usLowCACLeakage = le16_to_cpu(table->usLowCACLeakage); table 330 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c tdp_table->usHighCACLeakage = le16_to_cpu(table->usHighCACLeakage); table 377 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_Clock_Voltage_Dependency_Table *table) table 385 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c * table->ucNumEntries; table 391 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c dep_table->count = (unsigned long)table->ucNumEntries; table 395 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c ((unsigned long)table->entries[i].ucClockHigh << 16) | table 396 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c le16_to_cpu(table->entries[i].usClockLow); table 398 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c (unsigned long)le16_to_cpu(table->entries[i].usVoltage); table 408 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const struct phm_clock_voltage_dependency_table *table) table 413 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table_size = sizeof(unsigned long) + sizeof(unsigned long) * table->count; table 418 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c clock_table->count = (unsigned long)table->count; table 421 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c clock_table->values[i] = (unsigned long)table->entries[i].clk; table 430 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_Clock_Voltage_Limit_Table *table) table 432 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->sclk = ((unsigned long)table->entries[0].ucSclkHigh << 16) | table 433 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c le16_to_cpu(table->entries[0].usSclkLow); table 434 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->mclk = ((unsigned long)table->entries[0].ucMclkHigh << 16) | table 435 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c le16_to_cpu(table->entries[0].usMclkLow); table 436 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->vddc = (unsigned long)le16_to_cpu(table->entries[0].usVddc); table 437 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->vddci = (unsigned long)le16_to_cpu(table->entries[0].usVddci); table 1109 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *table, table 1117 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->numEntries; table 1123 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c uvd_table->count = table->numEntries; table 1125 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c for (i = 0; i < table->numEntries; i++) { table 1127 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &array->entries[table->entries[i].ucUVDClockInfoIndex]; table 1128 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c uvd_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); table 1142 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table, table 1150 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c * table->numEntries; table 1156 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c vce_table->count = table->numEntries; table 1157 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c for (i = 0; i < table->numEntries; i++) { table 1158 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const VCEClockInfo *entry = &array->entries[table->entries[i].ucVCEClockInfoIndex]; table 1160 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c vce_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); table 1174 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_SAMClk_Voltage_Limit_Table *table) table 1181 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->numEntries; table 1187 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c samu_table->count = table->numEntries; table 1189 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c for (i = 0; i < table->numEntries; i++) { table 1190 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c samu_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); table 1191 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c samu_table->entries[i].samclk = ((unsigned long)table->entries[i].ucSAMClockHigh << 16) table 1192 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | le16_to_cpu(table->entries[i].usSAMClockLow); table 1202 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_ACPClk_Voltage_Limit_Table *table) table 1209 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->numEntries; table 1215 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c acp_table->count = (unsigned long)table->numEntries; table 1217 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c for (i = 0; i < table->numEntries; i++) { table 1218 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c acp_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); table 1219 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c acp_table->entries[i].acpclk = ((unsigned long)table->entries[i].ucACPClockHigh << 16) table 1220 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | le16_to_cpu(table->entries[i].usACPClockLow); table 1231 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c ATOM_PPLIB_Clock_Voltage_Dependency_Table *table; table 1259 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *table = table 1264 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table, array); table 1331 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) table 1335 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &hwmgr->dyn_state.vddc_dependency_on_sclk, table); table 1339 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) table 1343 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &hwmgr->dyn_state.vddci_dependency_on_mclk, table); table 1347 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) table 1351 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &hwmgr->dyn_state.vddc_dependency_on_mclk, table); table 1374 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) table 1378 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &hwmgr->dyn_state.mvdd_dependency_on_mclk, table); table 1386 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) table 1389 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &hwmgr->dyn_state.vdd_gfx_dependency_on_sclk, table); table 1397 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_CAC_Leakage_Table *table) table 1402 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c if (hwmgr == NULL || table == NULL || ptable == NULL) table 1406 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c (sizeof(struct phm_cac_leakage_table) * table->ucNumEntries); table 1413 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->count = (ULONG)table->ucNumEntries; table 1418 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc1 = le16_to_cpu(table->entries[i].usVddc1); table 1419 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc2 = le16_to_cpu(table->entries[i].usVddc2); table 1420 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc3 = le16_to_cpu(table->entries[i].usVddc3); table 1422 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc = le16_to_cpu(table->entries[i].usVddc); table 1423 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Leakage = le32_to_cpu(table->entries[i].ulLeakageValue); table 1542 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c struct phm_phase_shedding_limits_table *table; table 1550 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table = kzalloc(size, GFP_KERNEL); table 1552 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c if (table == NULL) table 1555 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->count = (unsigned long)ptable->ucNumEntries; table 1557 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c for (i = 0; i < table->count; i++) { table 1558 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->entries[i].Voltage = (unsigned long)le16_to_cpu(ptable->entries[i].usVoltage); table 1559 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->entries[i].Sclk = ((unsigned long)ptable->entries[i].ucSclkHigh << 16) table 1561 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->entries[i].Mclk = ((unsigned long)ptable->entries[i].ucMclkHigh << 16) table 1564 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c hwmgr->dyn_state.vddc_phase_shed_limits_table = table; table 1574 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_POWERPLAYTABLE *table = table 1577 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c get_vce_state_table(hwmgr, table); table 130 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c struct phm_clock_and_voltage_limits *table) table 441 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c DpmClocks_t *table = &(smu10_data->clock_table); table 444 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true); table 450 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c if (0 == result && table->DcefClocks[0].Freq != 0) { table 1158 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c Watermarks_t *table = &(data->water_marks_table); table 1161 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); table 1162 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false); table 103 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 109 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c for (i = 0; i < (int)table->count; i++) { table 110 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock <= table->entries[i].clk) table 117 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c for (i = table->count - 1; i >= 0; i--) { table 118 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock >= table->entries[i].clk) table 255 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_and_voltage_limits *table) table 263 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table->sclk = dep_table->entries[dep_table->count-1].clk; table 264 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr, table 267 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table->mclk = sys_info->nbp_memory_clock[0]; table 438 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c void *table = NULL; table 455 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c ret = smum_download_powerplay_table(hwmgr, &table); table 457 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c PP_ASSERT_WITH_CODE((0 == ret && NULL != table), table 460 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock_table = (struct SMU8_Fusion_ClkTable *)table; table 555 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 559 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (NULL == table || table->count <= 0) table 562 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_min_clk = table->entries[0].clk; table 563 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.hard_min_clk = table->entries[0].clk; table 567 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 568 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].clk; table 570 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].clk; table 581 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_uvd_clock_voltage_dependency_table *table = table 585 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (NULL == table || table->count <= 0) table 594 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 595 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].vclk; table 597 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].vclk; table 608 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_vce_clock_voltage_dependency_table *table = table 612 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (NULL == table || table->count <= 0) table 621 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 622 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].ecclk; table 624 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].ecclk; table 635 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_acp_clock_voltage_dependency_table *table = table 639 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (NULL == table || table->count <= 0) table 648 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 649 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].acpclk; table 651 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].acpclk; table 684 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 692 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_min_clk = table->entries[0].clk; table 695 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 696 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_max_clk = table->entries[level].clk; table 698 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; table 1144 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 1148 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (NULL == table || table->count <= 0) table 1151 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_min_clk = table->entries[0].clk; table 1152 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.hard_min_clk = table->entries[0].clk; table 1153 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c hwmgr->pstate_sclk = table->entries[0].clk; table 1158 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 1159 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].clk; table 1161 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].clk; table 1343 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 1350 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk; table 1351 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; table 1476 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c const struct phm_clock_voltage_dependency_table *table = table 1484 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c for (i = table->count - 1; i > 0; i--) { table 1485 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (limits->vddc >= table->entries[i].v) { table 1486 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c info->level = table->entries[i].clk; table 1606 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table; table 1615 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table = hwmgr->dyn_state.vddc_dependency_on_sclk; table 1617 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clocks->clock[i] = table->entries[i].clk * 10; table 1633 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 1639 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if ((NULL == table) || (table->count <= 0) || (clocks == NULL)) table 1644 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (level < table->count) table 1645 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clocks->engine_max_clock = table->entries[level].clk; table 1647 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clocks->engine_max_clock = table->entries[table->count - 1].clk; table 1674 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c struct phm_clock_voltage_dependency_table *table = table 1702 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c sclk = table->entries[sclk_index].clk; table 52 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c uint32_t *table; table 55 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table = kzalloc(array_size, GFP_KERNEL); table 56 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (NULL == table) table 60 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table[i] = le32_to_cpu(pptable_array[i]); table 62 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c *pptable_info_array = table; table 74 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c uint32_t *table; table 77 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table = kzalloc(array_size, GFP_KERNEL); table 78 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (NULL == table) table 82 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table[i] = le32_to_cpu(pptable_array[i]); table 84 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c *pptable_info_array = table; table 208 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct pp_atomctrl_voltage_table *table; table 213 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table = kzalloc(sizeof(struct pp_atomctrl_voltage_table), table 216 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (NULL == table) table 219 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->mask_low = vol_table->mask_low; table 220 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->phase_delay = vol_table->phase_delay; table 226 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c for (j = 0; j < table->count; j++) { table 227 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (vvalue == table->entries[j].value) { table 234 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->entries[table->count].value = vvalue; table 235 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->entries[table->count].smio_low = table 237 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->count++; table 241 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); table 242 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c kfree(table); table 243 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table = NULL; table 345 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c int phm_reset_single_dpm_table(void *table, table 350 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; table 361 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c void *table, table 365 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; table 371 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c int32_t phm_get_dpm_level_enable_mask_value(void *table) table 375 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; table 442 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c int phm_find_boot_level(void *table, table 447 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; table 537 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct phm_clock_voltage_dependency_table *table = table 543 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (!table || table->count <= 0 table 548 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c for (i = 0; i < table->count; i++) { table 549 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (dal_power_level == table->entries[i].clk) { table 550 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c req_vddc = table->entries[i].v; table 663 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, table 670 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c adev->mode_info.atom_context, table, size, table 708 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c struct watermarks *table = wt_table; table 710 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (!table || !wm_with_clock_ranges) table 717 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[1][i].MinClock = table 721 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[1][i].MaxClock = table 725 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[1][i].MinUclk = table 729 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[1][i].MaxUclk = table 733 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[1][i].WmSetting = (uint8_t) table 738 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[0][i].MinClock = table 742 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[0][i].MaxClock = table 746 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[0][i].MinUclk = table 750 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[0][i].MaxUclk = table 754 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->WatermarkRow[0][i].WmSetting = (uint8_t) table 80 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max); table 81 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes); table 82 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h extern int32_t phm_get_dpm_level_enable_mask_value(void *table); table 87 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level); table 114 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, table 955 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c struct pp_atomfwctrl_voltage_table table; table 962 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c VOLTAGE_OBJ_GPIO_LUT, &table); table 965 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c tmp = table.mask_low; table 1011 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c struct pp_atomfwctrl_voltage_table *table; table 1015 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table), table 1018 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (!table) table 1021 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->mask_low = vol_table->mask_low; table 1022 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->phase_delay = vol_table->phase_delay; table 1028 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c for (j = 0; j < table->count; j++) { table 1029 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (vvalue == table->entries[j].value) { table 1036 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->entries[table->count].value = vvalue; table 1037 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->entries[table->count].smio_low = table 1039 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->count++; table 1043 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table)); table 1044 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c kfree(table); table 3440 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c struct vega10_single_dpm_table *table) table 3444 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c for (i = 0; i < table->count; i++) { table 3445 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (table->dpm_levels[i].enabled) table 3453 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c struct vega10_single_dpm_table *table) table 3457 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (table->count <= MAX_REGULAR_DPM_NUMBER) { table 3458 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c for (i = table->count; i > 0; i--) { table 3459 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (table->dpm_levels[i - 1].enabled) table 4357 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c Watermarks_t *table = &(data->smc_state_table.water_marks_table); table 4361 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); table 1291 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c PPTable_t *table = &(data->smc_state_table.pp_table); table 1293 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->SocketPowerLimit = cpu_to_le16( table 1295 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->TdcLimit = cpu_to_le16(tdp_table->usTDC); table 1296 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->EdcLimit = cpu_to_le16(tdp_table->usEDCLimit); table 1297 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->TedgeLimit = cpu_to_le16(tdp_table->usTemperatureLimitTedge); table 1298 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->ThotspotLimit = cpu_to_le16(tdp_table->usTemperatureLimitHotspot); table 1299 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->ThbmLimit = cpu_to_le16(tdp_table->usTemperatureLimitHBM); table 1300 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Tvr_socLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrVddc); table 1301 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Tvr_memLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrMvdd); table 1302 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Tliquid1Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid1); table 1303 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Tliquid2Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid2); table 1304 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->TplxLimit = cpu_to_le16(tdp_table->usTemperatureLimitPlx); table 1305 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->LoadLineResistance = table 1307 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->FitLimit = 0; /* Not used for Vega10 */ table 1309 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Liquid1_I2C_address = tdp_table->ucLiquid1_I2C_address; table 1310 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Liquid2_I2C_address = tdp_table->ucLiquid2_I2C_address; table 1311 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Vr_I2C_address = tdp_table->ucVr_I2C_address; table 1312 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Plx_I2C_address = tdp_table->ucPlx_I2C_address; table 1314 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Liquid_I2C_LineSCL = tdp_table->ucLiquid_I2C_Line; table 1315 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Liquid_I2C_LineSDA = tdp_table->ucLiquid_I2C_LineSDA; table 1317 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Vr_I2C_LineSCL = tdp_table->ucVr_I2C_Line; table 1318 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Vr_I2C_LineSDA = tdp_table->ucVr_I2C_LineSDA; table 1320 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Plx_I2C_LineSCL = tdp_table->ucPlx_I2C_Line; table 1321 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c table->Plx_I2C_LineSDA = tdp_table->ucPlx_I2C_LineSDA; table 424 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c const Vega10_PPTable_Generic_SubTable_Header *table) table 441 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c if (table->ucRevId == 5) { table 442 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c power_tune_table = (ATOM_Vega10_PowerTune_Table *)table; table 475 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c } else if (table->ucRevId == 6) { table 476 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table; table 522 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c power_tune_table_v3 = (ATOM_Vega10_PowerTune_Table_V3 *)table; table 808 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c const Vega10_PPTable_Generic_SubTable_Header *table) table 815 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c (ATOM_Vega10_PCIE_Table *)table; table 879 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c struct phm_clock_array *table; table 887 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table = kzalloc(table_size, GFP_KERNEL); table 889 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c if (!table) table 892 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table->count = (uint32_t)clk_volt_pp_table->count; table 894 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c for (i = 0; i < table->count; i++) table 895 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk; table 897 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c *clk_table = table; table 1070 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c phm_ppt_v1_voltage_lookup_table *table; table 1078 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table = kzalloc(table_size, GFP_KERNEL); table 1080 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c if (table == NULL) table 1083 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table->count = vddc_lookup_pp_tables->ucNumEntries; table 1086 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table->entries[i].us_vdd = table 1089 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c *lookup_table = table; table 507 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c PPTable_t *table = &(data->smc_state_table.pp_table); table 512 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanMaximumRpm = (uint16_t)hwmgr->thermal_controller. table 514 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanThrottlingRpm = hwmgr->thermal_controller. table 516 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanAcousticLimitRpm = (uint16_t)(hwmgr->thermal_controller. table 518 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanTargetTemperature = hwmgr->thermal_controller. table 523 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c (uint32_t)table->FanTargetTemperature); table 525 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanPwmMin = hwmgr->thermal_controller. table 527 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanTargetGfxclk = (uint16_t)(hwmgr->thermal_controller. table 529 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainEdge = hwmgr->thermal_controller. table 531 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainHotspot = hwmgr->thermal_controller. table 533 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainLiquid = hwmgr->thermal_controller. table 535 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainVrVddc = hwmgr->thermal_controller. table 537 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainVrMvdd = hwmgr->thermal_controller. table 539 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainPlx = hwmgr->thermal_controller. table 541 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanGainHbm = hwmgr->thermal_controller. table 543 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanZeroRpmEnable = hwmgr->thermal_controller. table 545 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanStopTemp = hwmgr->thermal_controller. table 547 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanStartTemp = hwmgr->thermal_controller. table 562 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c PPTable_t *table = &(data->smc_state_table.pp_table); table 572 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c table->FanThrottlingRpm = hwmgr->thermal_controller. table 1000 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c struct vega12_single_dpm_table *table) table 1004 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c for (i = 0; i < table->count; i++) { table 1005 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c if (table->dpm_levels[i].enabled) table 1009 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c if (i >= table->count) { table 1011 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c table->dpm_levels[i].enabled = true; table 1018 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c struct vega12_single_dpm_table *table) table 1021 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, table 1025 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c for (i = table->count - 1; i >= 0; i--) { table 1026 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c if (table->dpm_levels[i].enabled) table 1032 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c table->dpm_levels[i].enabled = true; table 1866 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c Watermarks_t *table = &(data->smc_state_table.water_marks_table); table 1872 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); table 258 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c PPTable_t *table = &(data->smc_state_table.pp_table); table 262 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c (uint32_t)table->FanTargetTemperature); table 1750 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c struct vega20_single_dpm_table *table) table 1754 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c for (i = 0; i < table->count; i++) { table 1755 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c if (table->dpm_levels[i].enabled) table 1758 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c if (i >= table->count) { table 1760 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c table->dpm_levels[i].enabled = true; table 1767 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c struct vega20_single_dpm_table *table) table 1771 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c PP_ASSERT_WITH_CODE(table != NULL, table 1774 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c PP_ASSERT_WITH_CODE(table->count > 0, table 1777 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, table 1781 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c for (i = table->count - 1; i >= 0; i--) { table 1782 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c if (table->dpm_levels[i].enabled) table 1787 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c table->dpm_levels[i].enabled = true; table 2891 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c Watermarks_t *table = &(data->smc_state_table.water_marks_table); table 2897 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); table 693 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c uint8_t *table; table 697 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c table = kzalloc(array_size, GFP_KERNEL); table 698 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c if (NULL == table) table 702 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c table[i] = le32_to_cpu(pptable_array[i]); table 703 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c if (table[i]) table 707 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c *pptable_info_array = table; table 328 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c PPTable_t *table = &(data->smc_state_table.pp_table); table 332 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c (uint32_t)table->FanTargetTemperature); table 771 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, table 799 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h int smu_sys_get_pp_table(struct smu_context *smu, void **table); table 215 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h void **table); table 231 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */ table 86 drivers/gpu/drm/amd/powerplay/inc/smumgr.h extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); table 115 drivers/gpu/drm/amd/powerplay/inc/smumgr.h extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); table 1298 drivers/gpu/drm/amd/powerplay/navi10_ppt.c Watermarks_t *table = watermarks; table 1300 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!table || !clock_ranges) table 1308 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[1][i].MinClock = table 1312 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[1][i].MaxClock = table 1316 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[1][i].MinUclk = table 1320 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[1][i].MaxUclk = table 1324 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[1][i].WmSetting = (uint8_t) table 1329 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[0][i].MinClock = table 1333 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[0][i].MaxClock = table 1337 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[0][i].MinUclk = table 1341 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[0][i].MaxUclk = table 1345 drivers/gpu/drm/amd/powerplay/navi10_ppt.c table->WatermarkRow[0][i].WmSetting = (uint8_t) table 166 drivers/gpu/drm/amd/powerplay/renoir_ppt.c DpmClocks_t *table = smu->smu_table.clocks_table; table 168 drivers/gpu/drm/amd/powerplay/renoir_ppt.c if (!clock || !table) table 172 drivers/gpu/drm/amd/powerplay/renoir_ppt.c *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq; table 174 drivers/gpu/drm/amd/powerplay/renoir_ppt.c *clock = table->FClocks[0].Freq; table 33 drivers/gpu/drm/amd/powerplay/renoir_ppt.h #define GET_DPM_CUR_FREQ(table, clk_type, dpm_level, freq) \ table 37 drivers/gpu/drm/amd/powerplay/renoir_ppt.h freq = table->SocClocks[dpm_level].Freq; \ table 40 drivers/gpu/drm/amd/powerplay/renoir_ppt.h freq = table->FClocks[dpm_level].Freq; \ table 43 drivers/gpu/drm/amd/powerplay/renoir_ppt.h freq = table->DcfClocks[dpm_level].Freq; \ table 46 drivers/gpu/drm/amd/powerplay/renoir_ppt.h freq = table->FClocks[dpm_level].Freq; \ table 315 drivers/gpu/drm/amd/powerplay/smu_v11_0.c static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) table 325 drivers/gpu/drm/amd/powerplay/smu_v11_0.c *table = (uint8_t *)v2 + ppt_offset_bytes; table 330 drivers/gpu/drm/amd/powerplay/smu_v11_0.c static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, table 345 drivers/gpu/drm/amd/powerplay/smu_v11_0.c *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); table 365 drivers/gpu/drm/amd/powerplay/smu_v11_0.c void *table; table 374 drivers/gpu/drm/amd/powerplay/smu_v11_0.c ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); table 377 drivers/gpu/drm/amd/powerplay/smu_v11_0.c ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, table 392 drivers/gpu/drm/amd/powerplay/smu_v11_0.c (uint8_t **)&table); table 399 drivers/gpu/drm/amd/powerplay/smu_v11_0.c smu->smu_table.power_play_table = table; table 735 drivers/gpu/drm/amd/powerplay/smu_v11_0.c struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE]; table 740 drivers/gpu/drm/amd/powerplay/smu_v11_0.c table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL); table 778 drivers/gpu/drm/amd/powerplay/smu_v11_0.c struct smu_table *table = NULL; table 780 drivers/gpu/drm/amd/powerplay/smu_v11_0.c table = &smu_table->tables[SMU_TABLE_WATERMARKS]; table 782 drivers/gpu/drm/amd/powerplay/smu_v11_0.c if (!table->cpu_addr) table 785 drivers/gpu/drm/amd/powerplay/smu_v11_0.c ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, table 1342 drivers/gpu/drm/amd/powerplay/smu_v11_0.c void *table = watermarks->cpu_addr; table 1347 drivers/gpu/drm/amd/powerplay/smu_v11_0.c smu_set_watermarks_table(smu, table, clock_ranges); table 310 drivers/gpu/drm/amd/powerplay/smu_v12_0.c struct smu_table *table = NULL; table 312 drivers/gpu/drm/amd/powerplay/smu_v12_0.c table = &smu_table->tables[SMU_TABLE_DPMCLOCKS]; table 313 drivers/gpu/drm/amd/powerplay/smu_v12_0.c if (!table) table 316 drivers/gpu/drm/amd/powerplay/smu_v12_0.c if (!table->cpu_addr) table 840 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 846 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddcLevelCount = data->vddc_voltage_table.count; table 847 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (count = 0; count < table->VddcLevelCount; count++) { table 850 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &(table->VddcLevel[count])); table 855 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddcLevel[count].Smio = (uint8_t) count; table 856 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low; table 857 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low; table 859 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddcLevel[count].Smio = 0; table 863 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); table 869 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 875 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddciLevelCount = data->vddci_voltage_table.count; table 877 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (count = 0; count < table->VddciLevelCount; count++) { table 880 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &(table->VddciLevel[count])); table 883 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddciLevel[count].Smio = (uint8_t) count; table 884 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low; table 885 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low; table 887 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddciLevel[count].Smio = 0; table 891 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); table 897 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 903 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MvddLevelCount = data->mvdd_voltage_table.count; table 905 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (count = 0; count < table->MvddLevelCount; count++) { table 908 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &table->MvddLevel[count]); table 911 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MvddLevel[count].Smio = (uint8_t) count; table 912 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low; table 913 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low; table 915 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MvddLevel[count].Smio = 0; table 919 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); table 926 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 930 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_vddc_table(hwmgr, table); table 934 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_vdd_ci_table(hwmgr, table); table 938 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_mvdd_table(hwmgr, table); table 995 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) table 1004 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->LinkLevel[i].PcieGenSpeed = table 1006 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->LinkLevel[i].PcieLaneCount = table 1008 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->LinkLevel[i].EnabledForActivity = 1; table 1009 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5); table 1010 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30); table 1376 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 1390 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1393 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); table 1395 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); table 1397 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1; table 1399 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); table 1403 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.SclkFrequency, ÷rs); table 1409 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; table 1410 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; table 1411 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.DeepSleepDivId = 0; table 1420 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; table 1421 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; table 1422 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; table 1423 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; table 1424 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; table 1425 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; table 1426 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.CcPwrDynRm = 0; table 1427 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPILevel.CcPwrDynRm1 = 0; table 1430 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); table 1432 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); table 1433 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); table 1434 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); table 1435 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); table 1436 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); table 1437 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); table 1438 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); table 1439 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); table 1440 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); table 1444 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; table 1445 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; table 1448 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; table 1451 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); table 1453 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); table 1457 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinMvdd = table 1460 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MinMvdd = 0; table 1480 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.DllCntl = table 1482 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MclkPwrmgtCntl = table 1484 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllAdFuncCntl = table 1486 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllDqFuncCntl = table 1488 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllFuncCntl = table 1490 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllFuncCntl_1 = table 1492 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllFuncCntl_2 = table 1494 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllSs1 = table 1496 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.MpllSs2 = table 1499 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.EnabledForThrottle = 0; table 1500 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.EnabledForActivity = 0; table 1501 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.UpH = 0; table 1502 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.DownH = 100; table 1503 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.VoltageDownH = 0; table 1505 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity); table 1507 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.StutterEnable = 0; table 1508 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.StrobeEnable = 0; table 1509 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.EdcReadEnable = 0; table 1510 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.EdcWriteEnable = 0; table 1511 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryACPILevel.RttEnable = 0; table 1517 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 1525 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevelCount = (uint8_t)(uvd_table->count); table 1527 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (count = 0; count < table->UvdLevelCount; count++) { table 1528 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].VclkFrequency = table 1530 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].DclkFrequency = table 1532 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].MinVddc = table 1534 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].MinVddcPhases = 1; table 1537 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].VclkFrequency, ÷rs); table 1541 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; table 1544 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].DclkFrequency, ÷rs); table 1548 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; table 1549 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); table 1550 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); table 1551 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc); table 1558 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 1566 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevelCount = (uint8_t)(vce_table->count); table 1567 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceBootLevel = 0; table 1569 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (count = 0; count < table->VceLevelCount; count++) { table 1570 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevel[count].Frequency = vce_table->entries[count].evclk; table 1571 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevel[count].MinVoltage = table 1573 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevel[count].MinPhases = 1; table 1576 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevel[count].Frequency, ÷rs); table 1581 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1583 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); table 1584 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage); table 1590 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 1598 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevelCount = (uint8_t)(acp_table->count); table 1599 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpBootLevel = 0; table 1601 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (count = 0; count < table->AcpLevelCount; count++) { table 1602 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk; table 1603 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].MinVoltage = acp_table->entries[count].v; table 1604 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].MinPhases = 1; table 1607 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].Frequency, ÷rs); table 1611 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1613 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); table 1614 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage); table 1684 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 1690 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->GraphicsBootLevel = 0; table 1691 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryBootLevel = 0; table 1714 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->BootVddc = data->vbios_boot_state.vddc_bootup_value; table 1715 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->BootVddci = data->vbios_boot_state.vddci_bootup_value; table 1716 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; table 1882 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table) table 1887 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SVI2Enable = 1; table 1889 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SVI2Enable = 0; table 1909 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) table 1915 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); table 1919 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRConfig |= config; table 1926 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); table 1929 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); table 1934 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRConfig |= (config<<VRCONF_MVDD_SHIFT); table 1945 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1953 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ci_populate_smc_voltage_tables(hwmgr, table); table 1957 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1962 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1965 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1968 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_ulv_state(hwmgr, &(table->Ulv)); table 1984 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_link_level(hwmgr, table); table 1988 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_acpi_level(hwmgr, table); table 1992 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_vce_level(hwmgr, table); table 1996 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_acp_level(hwmgr, table); table 2006 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_uvd_level(hwmgr, table); table 2010 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UvdBootLevel = 0; table 2011 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceBootLevel = 0; table 2012 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpBootLevel = 0; table 2013 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SamuBootLevel = 0; table 2015 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->GraphicsBootLevel = 0; table 2016 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryBootLevel = 0; table 2018 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_smc_boot_level(hwmgr, table); table 2028 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->UVDInterval = 1; table 2029 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VCEInterval = 1; table 2030 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ACPInterval = 1; table 2031 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SAMUInterval = 1; table 2032 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->GraphicsVoltageChangeEnable = 1; table 2033 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->GraphicsThermThrottleEnable = 1; table 2034 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->GraphicsInterval = 1; table 2035 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VoltageInterval = 1; table 2036 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ThermalInterval = 1; table 2038 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->TemperatureLimitHigh = table 2041 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->TemperatureLimitLow = table 2045 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryVoltageChangeEnable = 1; table 2046 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryInterval = 1; table 2047 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VoltageResponseTime = 0; table 2048 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddcVddciDelta = 4000; table 2049 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->PhaseResponseTime = 0; table 2050 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->MemoryThermThrottleEnable = 1; table 2056 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count; table 2057 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->PCIeGenInterval = 1; table 2059 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_populate_vr_config(hwmgr, table); table 2062 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c data->vr_config = table->VRConfig; table 2064 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ci_populate_smc_svi2_config(hwmgr, table); table 2067 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]); table 2069 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->ThermGpio = 17; table 2070 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SclkStepSize = 0x4000; table 2072 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2076 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; table 2081 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; table 2083 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); table 2084 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); table 2085 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); table 2086 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); table 2087 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); table 2088 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); table 2089 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); table 2090 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); table 2091 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); table 2092 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta); table 2093 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); table 2094 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); table 2096 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); table 2097 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); table 2098 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); table 2103 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (uint8_t *)&(table->SystemFlags), table 2538 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) table 2543 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0; i < table->last; i++) { table 2544 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[i].s0 = table 2545 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) table 2546 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ? address : table->mc_reg_address[i].s1; table 2551 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, table 2556 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), table 2558 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), table 2561 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0; i < table->last; i++) table 2562 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 2564 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ni_table->last = table->last; table 2566 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0; i < table->num_entries; i++) { table 2568 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[i].mclk_max; table 2569 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (j = 0; j < table->last; j++) { table 2571 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[i].mc_data[j]; table 2575 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ni_table->num_entries = table->num_entries; table 2581 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c struct ci_mc_reg_table *table) table 2587 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0, j = table->last; i < table->last; i++) { table 2591 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c switch (table->mc_reg_address[i].s1) { table 2595 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; table 2596 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; table 2597 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2598 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2600 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 2607 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; table 2608 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; table 2609 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2610 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2612 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 2615 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 2622 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table 2623 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; table 2624 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2625 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2626 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; table 2635 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; table 2636 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; table 2637 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2638 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2640 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 2651 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->last = j; table 2656 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c static int ci_set_valid_flag(struct ci_mc_reg_table *table) table 2660 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0; i < table->last; i++) { table 2661 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (j = 1; j < table->num_entries; j++) { table 2662 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table 2663 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->mc_reg_table_entry[j].mc_data[i]) { table 2664 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->validflag |= (1 << i); table 2677 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c pp_atomctrl_mc_reg_table *table; table 2681 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); table 2683 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (NULL == table) table 2708 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); table 2711 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c result = ci_copy_vbios_smc_reg_table(table, ni_table); table 2721 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c kfree(table); table 757 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 775 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->BapmVddcVidLoSidd[count] = table 777 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->BapmVddcVidHiSidd[count] = table 785 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 789 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_cac_table(hwmgr, table); table 823 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 825 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c return fiji_populate_ulv_level(hwmgr, &table->Ulv); table 829 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 839 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->LinkLevel[i].PcieGenSpeed = table 841 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( table 843 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->LinkLevel[i].EnabledForActivity = 1; table 844 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); table 845 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); table 846 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); table 1301 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c SMU73_Discrete_DpmTable *table) table 1314 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1319 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SclkFrequency = table 1323 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SclkFrequency, table 1324 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd); table 1330 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SclkFrequency = table 1332 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.MinVoltage = table 1338 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SclkFrequency, ÷rs); table 1343 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; table 1344 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; table 1345 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.DeepSleepDivId = 0; table 1354 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; table 1355 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; table 1356 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; table 1357 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; table 1358 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; table 1359 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; table 1360 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.CcPwrDynRm = 0; table 1361 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ACPILevel.CcPwrDynRm1 = 0; table 1363 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); table 1364 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); table 1365 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); table 1366 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); table 1367 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); table 1368 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); table 1369 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); table 1370 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); table 1371 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); table 1372 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); table 1373 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); table 1377 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.MclkFrequency = table 1381 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.MclkFrequency, table 1382 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd); table 1387 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.MclkFrequency = table 1389 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.MinVoltage = table 1404 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.MinMvdd = table 1407 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.EnabledForThrottle = 0; table 1408 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.EnabledForActivity = 0; table 1409 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.UpHyst = 0; table 1410 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.DownHyst = 100; table 1411 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.VoltageDownHyst = 0; table 1412 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.ActivityLevel = table 1415 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryACPILevel.StutterEnable = false; table 1416 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); table 1417 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); table 1423 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c SMU73_Discrete_DpmTable *table) table 1433 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevelCount = (uint8_t)(mm_table->count); table 1434 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceBootLevel = 0; table 1436 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c for (count = 0; count < table->VceLevelCount; count++) { table 1437 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].Frequency = mm_table->entries[count].eclk; table 1438 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].MinVoltage = 0; table 1439 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].MinVoltage |= table 1441 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].MinVoltage |= table 1444 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1448 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].Frequency, ÷rs); table 1453 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1455 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); table 1456 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); table 1462 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c SMU73_Discrete_DpmTable *table) table 1472 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevelCount = (uint8_t)(mm_table->count); table 1473 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpBootLevel = 0; table 1475 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c for (count = 0; count < table->AcpLevelCount; count++) { table 1476 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; table 1477 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * table 1479 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - table 1481 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1485 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].Frequency, ÷rs); table 1489 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1491 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); table 1492 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage); table 1559 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 1569 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevelCount = (uint8_t)(mm_table->count); table 1570 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdBootLevel = 0; table 1572 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c for (count = 0; count < table->UvdLevelCount; count++) { table 1573 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].MinVoltage = 0; table 1574 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; table 1575 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table 1576 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * table 1578 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - table 1580 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1584 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].VclkFrequency, ÷rs); table 1588 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; table 1591 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].DclkFrequency, ÷rs); table 1595 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; table 1597 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); table 1598 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); table 1599 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); table 1606 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 1611 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->GraphicsBootLevel = 0; table 1612 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryBootLevel = 0; table 1617 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (uint32_t *)&(table->GraphicsBootLevel)); table 1621 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (uint32_t *)&(table->MemoryBootLevel)); table 1623 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->BootVddc = data->vbios_boot_state.vddc_bootup_value * table 1625 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->BootVddci = data->vbios_boot_state.vddci_bootup_value * table 1627 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * table 1630 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); table 1631 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); table 1632 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); table 1824 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table) table 1830 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); table 1835 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= config; table 1844 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1847 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1850 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1855 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1858 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1861 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1930 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1937 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c fiji_populate_smc_voltage_tables(hwmgr, table); table 1939 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->SystemFlags = 0; table 1943 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1947 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1950 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1953 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_ulv_state(hwmgr, table); table 1960 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_smc_link_level(hwmgr, table); table 1972 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_smc_acpi_level(hwmgr, table); table 1976 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_smc_vce_level(hwmgr, table); table 1980 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_smc_acp_level(hwmgr, table); table 1992 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_smc_uvd_level(hwmgr, table); table 1996 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_smc_boot_level(hwmgr, table); table 2016 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->GraphicsVoltageChangeEnable = 1; table 2017 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->GraphicsThermThrottleEnable = 1; table 2018 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->GraphicsInterval = 1; table 2019 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VoltageInterval = 1; table 2020 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermalInterval = 1; table 2021 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->TemperatureLimitHigh = table 2024 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->TemperatureLimitLow = table 2027 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryVoltageChangeEnable = 1; table 2028 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryInterval = 1; table 2029 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VoltageResponseTime = 0; table 2030 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->PhaseResponseTime = 0; table 2031 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->MemoryThermThrottleEnable = 1; table 2032 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ table 2033 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->PCIeGenInterval = 1; table 2034 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRConfig = 0; table 2036 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c result = fiji_populate_vr_config(hwmgr, table); table 2039 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c data->vr_config = table->VRConfig; table 2040 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermGpio = 17; table 2041 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->SclkStepSize = 0x4000; table 2044 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2048 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; table 2055 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2059 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; table 2070 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2077 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & table 2079 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; table 2086 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; table 2090 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutGpio = 17; table 2091 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutPolarity = 1; table 2092 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; table 2096 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); table 2098 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); table 2099 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); table 2100 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); table 2101 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); table 2102 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); table 2103 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); table 2104 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); table 2105 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); table 2106 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); table 2112 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (uint8_t *)&(table->SystemFlags), table 618 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 624 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddcLevelCount = data->vddc_voltage_table.count; table 625 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (count = 0; count < table->VddcLevelCount; count++) { table 628 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &(table->VddcLevel[count])); table 633 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; table 635 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddcLevel[count].Smio = 0; table 638 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); table 644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 650 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddciLevelCount = data->vddci_voltage_table.count; table 652 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (count = 0; count < table->VddciLevelCount; count++) { table 655 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &(table->VddciLevel[count])); table 658 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; table 660 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddciLevel[count].Smio |= 0; table 663 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); table 669 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 675 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MvddLevelCount = data->mvdd_voltage_table.count; table 677 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (count = 0; count < table->VddciLevelCount; count++) { table 680 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &table->MvddLevel[count]); table 683 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; table 685 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MvddLevel[count].Smio |= 0; table 688 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); table 695 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 699 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_vddc_table(hwmgr, table); table 703 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_vdd_ci_table(hwmgr, table); table 707 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_mvdd_table(hwmgr, table); table 764 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) table 773 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->LinkLevel[i].PcieGenSpeed = table 775 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->LinkLevel[i].PcieLaneCount = table 777 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->LinkLevel[i].EnabledForActivity = table 779 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->LinkLevel[i].SPC = table 781 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->LinkLevel[i].DownThreshold = table 783 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->LinkLevel[i].UpThreshold = table 1423 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 1438 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1441 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); table 1443 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); table 1445 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1; table 1447 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); table 1451 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.SclkFrequency, ÷rs); table 1457 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; table 1458 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; table 1459 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.DeepSleepDivId = 0; table 1468 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; table 1469 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; table 1470 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; table 1471 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; table 1472 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; table 1473 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; table 1474 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.CcPwrDynRm = 0; table 1475 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ACPILevel.CcPwrDynRm1 = 0; table 1479 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); table 1481 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); table 1482 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); table 1483 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); table 1484 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); table 1485 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); table 1486 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); table 1487 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); table 1488 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); table 1489 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); table 1492 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; table 1493 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; table 1496 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; table 1499 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); table 1501 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); table 1505 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinMvdd = table 1508 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MinMvdd = 0; table 1528 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.DllCntl = table 1530 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MclkPwrmgtCntl = table 1532 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllAdFuncCntl = table 1534 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllDqFuncCntl = table 1536 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllFuncCntl = table 1538 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllFuncCntl_1 = table 1540 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllFuncCntl_2 = table 1542 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllSs1 = table 1544 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.MpllSs2 = table 1547 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.EnabledForThrottle = 0; table 1548 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.EnabledForActivity = 0; table 1549 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.UpHyst = 0; table 1550 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.DownHyst = 100; table 1551 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.VoltageDownHyst = 0; table 1553 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity); table 1555 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.StutterEnable = 0; table 1556 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.StrobeEnable = 0; table 1557 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.EdcReadEnable = 0; table 1558 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.EdcWriteEnable = 0; table 1559 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryACPILevel.RttEnable = 0; table 1565 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 1571 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 1577 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 1648 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table) table 1653 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->GraphicsBootLevel = 0; table 1654 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryBootLevel = 0; table 1677 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootVddc = data->vbios_boot_state.vddc_bootup_value; table 1679 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootVddci = table->BootVddc; table 1681 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootVddci = data->vbios_boot_state.vddci_bootup_value; table 1683 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; table 1934 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1941 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c iceland_populate_smc_voltage_tables(hwmgr, table); table 1946 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1951 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1954 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1966 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_link_level(hwmgr, table); table 1978 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_acpi_level(hwmgr, table); table 1982 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_vce_level(hwmgr, table); table 1986 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_acp_level(hwmgr, table); table 1996 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_uvd_level(hwmgr, table); table 2000 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->GraphicsBootLevel = 0; table 2001 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryBootLevel = 0; table 2003 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_boot_level(hwmgr, table); table 2013 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->GraphicsVoltageChangeEnable = 1; table 2014 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->GraphicsThermThrottleEnable = 1; table 2015 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->GraphicsInterval = 1; table 2016 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VoltageInterval = 1; table 2017 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ThermalInterval = 1; table 2019 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->TemperatureLimitHigh = table 2022 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->TemperatureLimitLow = table 2026 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryVoltageChangeEnable = 1; table 2027 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryInterval = 1; table 2028 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VoltageResponseTime = 0; table 2029 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->PhaseResponseTime = 0; table 2030 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MemoryThermThrottleEnable = 1; table 2031 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->PCIeBootLinkLevel = 0; table 2032 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->PCIeGenInterval = 1; table 2034 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_populate_smc_svi2_config(hwmgr, table); table 2038 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->ThermGpio = 17; table 2039 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->SclkStepSize = 0x4000; table 2041 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); table 2042 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); table 2043 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); table 2044 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); table 2045 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); table 2046 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); table 2047 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); table 2048 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); table 2049 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); table 2050 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); table 2052 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); table 2053 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); table 2054 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); table 2059 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c (uint8_t *)&(table->SystemFlags), table 2467 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table) table 2472 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0; i < table->last; i++) { table 2473 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[i].s0 = table 2474 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) table 2475 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c ? address : table->mc_reg_address[i].s1; table 2480 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, table 2485 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), table 2487 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), table 2490 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0; i < table->last; i++) { table 2491 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 2493 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c ni_table->last = table->last; table 2495 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0; i < table->num_entries; i++) { table 2497 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[i].mclk_max; table 2498 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (j = 0; j < table->last; j++) { table 2500 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[i].mc_data[j]; table 2504 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c ni_table->num_entries = table->num_entries; table 2510 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c struct iceland_mc_reg_table *table) table 2516 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0, j = table->last; i < table->last; i++) { table 2520 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c switch (table->mc_reg_address[i].s1) { table 2524 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; table 2525 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; table 2526 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2527 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2529 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 2536 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; table 2537 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; table 2538 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2539 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2541 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 2544 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 2552 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table 2553 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; table 2554 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2555 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2556 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; table 2565 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; table 2566 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; table 2567 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2568 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2570 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 2581 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->last = j; table 2586 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c static int iceland_set_valid_flag(struct iceland_mc_reg_table *table) table 2589 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0; i < table->last; i++) { table 2590 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (j = 1; j < table->num_entries; j++) { table 2591 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table 2592 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->mc_reg_table_entry[j].mc_data[i]) { table 2593 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->validflag |= (1<<i); table 2606 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c pp_atomctrl_mc_reg_table *table; table 2610 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); table 2612 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (NULL == table) table 2637 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); table 2640 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c result = iceland_copy_vbios_smc_reg_table(table, ni_table); table 2650 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c kfree(table); table 429 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 439 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); table 440 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); table 446 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( table 448 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( table 450 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->FanGainEdge = PP_HOST_TO_SMC_US( table 452 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->FanGainHotspot = PP_HOST_TO_SMC_US( table 461 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); table 462 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); table 647 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c SMU74_Discrete_DpmTable *table) table 657 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SmioTable2.Pattern[level].Voltage = table 660 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SmioTable2.Pattern[level].Smio = table 662 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->Smio[level] |= table 665 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SmioMask2 = data->mvdd_voltage_table.mask_low; table 667 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); table 674 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 685 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SmioTable1.Pattern[level].Voltage = table 687 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SmioTable1.Pattern[level].Smio = (uint8_t) level; table 689 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; table 693 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SmioMask1 = data->vddci_voltage_table.mask_low; table 699 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 716 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low); table 717 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid); table 718 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high); table 725 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 727 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c polaris10_populate_smc_vddci_table(hwmgr, table); table 728 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c polaris10_populate_smc_mvdd_table(hwmgr, table); table 729 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c polaris10_populate_cac_table(hwmgr, table); table 761 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 763 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c return polaris10_populate_ulv_level(hwmgr, &table->Ulv); table 767 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 777 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i].PcieGenSpeed = table 779 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( table 781 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i].EnabledForActivity = 1; table 782 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); table 783 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); table 784 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); table 799 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c SMU74_Discrete_DpmTable *table) table 810 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting; table 811 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv; table 812 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc; table 814 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper; table 815 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower; table 817 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); table 818 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); table 819 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); table 828 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; table 829 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; table 830 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; table 832 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; table 833 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; table 835 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); table 836 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); table 837 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); table 845 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 881 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); table 882 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; table 889 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); table 896 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); table 897 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; table 1200 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c SMU74_Discrete_DpmTable *table) table 1210 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1218 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c &table->ACPILevel.MinVoltage, &mvdd); table 1224 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); table 1227 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ACPILevel.DeepSleepDivId = 0; table 1228 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ACPILevel.CcPwrDynRm = 0; table 1229 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ACPILevel.CcPwrDynRm1 = 0; table 1231 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); table 1232 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); table 1233 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); table 1234 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); table 1236 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); table 1237 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); table 1238 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); table 1239 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); table 1240 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); table 1241 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); table 1242 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); table 1243 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); table 1244 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); table 1245 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); table 1249 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; table 1252 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.MclkFrequency, table 1253 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c &table->MemoryACPILevel.MinVoltage, &mvdd); table 1266 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); table 1268 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.MinMvdd = 0; table 1270 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.StutterEnable = false; table 1272 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.EnabledForThrottle = 0; table 1273 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.EnabledForActivity = 0; table 1274 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.UpHyst = 0; table 1275 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.DownHyst = 100; table 1276 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.VoltageDownHyst = 0; table 1277 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryACPILevel.ActivityLevel = table 1280 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); table 1281 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); table 1287 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c SMU74_Discrete_DpmTable *table) table 1299 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevelCount = (uint8_t)(mm_table->count); table 1300 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceBootLevel = 0; table 1302 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c for (count = 0; count < table->VceLevelCount; count++) { table 1303 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].Frequency = mm_table->entries[count].eclk; table 1304 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].MinVoltage = 0; table 1305 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].MinVoltage |= table 1317 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].MinVoltage |= table 1319 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1323 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].Frequency, ÷rs); table 1328 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1330 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); table 1331 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); table 1393 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 1405 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevelCount = (uint8_t)(mm_table->count); table 1406 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdBootLevel = 0; table 1408 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c for (count = 0; count < table->UvdLevelCount; count++) { table 1409 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].MinVoltage = 0; table 1410 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; table 1411 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table 1412 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * table 1423 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table 1424 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1428 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].VclkFrequency, ÷rs); table 1432 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; table 1435 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].DclkFrequency, ÷rs); table 1439 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; table 1441 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); table 1442 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); table 1443 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); table 1450 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 1455 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->GraphicsBootLevel = 0; table 1456 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryBootLevel = 0; table 1461 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (uint32_t *)&(table->GraphicsBootLevel)); table 1465 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (uint32_t *)&(table->MemoryBootLevel)); table 1467 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BootVddc = data->vbios_boot_state.vddc_bootup_value * table 1469 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BootVddci = data->vbios_boot_state.vddci_bootup_value * table 1471 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * table 1474 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); table 1475 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); table 1476 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); table 1596 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table) table 1603 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); table 1608 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= config; table 1617 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1620 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1623 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1628 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1633 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1646 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1718 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); table 1719 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); table 1720 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); table 1721 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); table 1722 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); table 1723 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); table 1724 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); table 1725 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); table 1726 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); table 1727 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; table 1728 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; table 1729 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); table 1730 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); table 1731 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); table 1732 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; table 1733 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; table 1734 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); table 1828 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1836 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c polaris10_populate_smc_voltage_tables(hwmgr, table); table 1838 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SystemFlags = 0; table 1841 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1845 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1848 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1851 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_ulv_state(hwmgr, table); table 1858 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_smc_link_level(hwmgr, table); table 1870 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_smc_acpi_level(hwmgr, table); table 1874 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_smc_vce_level(hwmgr, table); table 1886 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_smc_uvd_level(hwmgr, table); table 1890 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_smc_boot_level(hwmgr, table); table 1913 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->CurrSclkPllRange = 0xff; table 1914 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->GraphicsVoltageChangeEnable = 1; table 1915 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->GraphicsThermThrottleEnable = 1; table 1916 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->GraphicsInterval = 1; table 1917 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VoltageInterval = 1; table 1918 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermalInterval = 1; table 1919 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->TemperatureLimitHigh = table 1922 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->TemperatureLimitLow = table 1925 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryVoltageChangeEnable = 1; table 1926 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryInterval = 1; table 1927 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VoltageResponseTime = 0; table 1928 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->PhaseResponseTime = 0; table 1929 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->MemoryThermThrottleEnable = 1; table 1930 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->PCIeBootLinkLevel = 0; table 1931 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->PCIeGenInterval = 1; table 1932 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRConfig = 0; table 1934 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c result = polaris10_populate_vr_config(hwmgr, table); table 1937 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c hw_data->vr_config = table->VRConfig; table 1938 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermGpio = 17; table 1939 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->SclkStepSize = 0x4000; table 1942 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; table 1944 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; table 1951 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; table 1955 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; table 1966 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; table 1973 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) table 1975 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; table 1980 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; table 1982 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutGpio = 17; table 1983 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutPolarity = 1; table 1984 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; table 1993 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); table 1995 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); table 1999 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); table 2001 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); table 2002 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); table 2003 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); table 2004 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); table 2005 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); table 2006 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); table 2007 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); table 2008 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); table 2009 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); table 2010 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); table 2016 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (uint8_t *)&(table->SystemFlags), table 117 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c uint8_t *table, int16_t table_id) table 142 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, table 149 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c uint8_t *table, int16_t table_id) table 161 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c memcpy(priv->smu_tables.entry[table_id].table, table, table 202 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c &priv->smu_tables.entry[SMU10_WMTABLE].table); table 205 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); table 250 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c &priv->smu_tables.entry[SMU10_WMTABLE].table); table 266 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); table 280 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c &priv->smu_tables.entry[SMU10_WMTABLE].table); table 286 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) table 291 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c ret = smu10_copy_table_from_smc(hwmgr, table, table_id); table 293 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c ret = smu10_copy_table_to_smc(hwmgr, table, table_id); table 37 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h void *table; table 602 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) table 613 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr; table 122 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table) table 126 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c table); table 213 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) table 216 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c return hwmgr->smumgr_funcs->smc_table_manager(hwmgr, table, table_id, rw); table 303 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 309 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddcLevelCount = data->vddc_voltage_table.count; table 310 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (count = 0; count < table->VddcLevelCount; count++) { table 311 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddcTable[count] = table 314 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); table 320 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 326 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddGfxLevelCount = data->vddgfx_voltage_table.count; table 328 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddGfxTable[count] = table 331 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); table 337 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 342 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddciLevelCount = data->vddci_voltage_table.count; table 343 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (count = 0; count < table->VddciLevelCount; count++) { table 345 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddciTable[count] = table 348 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SmioTable1.Pattern[count].Voltage = table 351 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SmioTable1.Pattern[count].Smio = table 353 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->Smio[count] |= table 355 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VddciTable[count] = table 360 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SmioMask1 = data->vddci_voltage_table.mask_low; table 361 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); table 367 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 373 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MvddLevelCount = data->mvdd_voltage_table.count; table 374 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (count = 0; count < table->MvddLevelCount; count++) { table 375 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SmioTable2.Pattern[count].Voltage = table 378 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SmioTable2.Pattern[count].Smio = table 380 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->Smio[count] |= table 383 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SmioMask2 = data->mvdd_voltage_table.mask_low; table 385 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); table 392 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 407 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount); table 408 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount); table 414 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddcVidLoSidd[count] = table 416 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddcVidHiSidd[count] = table 418 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddcVidHiSidd2[count] = table 427 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddGfxVidHiSidd2[count] = table 434 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddGfxVidLoSidd[count] = table 436 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddGfxVidHiSidd[count] = table 438 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BapmVddGfxVidHiSidd2[count] = table 447 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 451 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_vddc_table(hwmgr, table); table 456 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_vdd_ci_table(hwmgr, table); table 461 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); table 466 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_mvdd_table(hwmgr, table); table 471 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_cac_tables(hwmgr, table); table 502 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c struct SMU72_Discrete_DpmTable *table) table 504 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c return tonga_populate_ulv_level(hwmgr, &table->Ulv); table 507 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table) table 516 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->LinkLevel[i].PcieGenSpeed = table 518 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->LinkLevel[i].PcieLaneCount = table 520 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->LinkLevel[i].EnabledForActivity = table 522 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->LinkLevel[i].SPC = table 524 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->LinkLevel[i].DownThreshold = table 526 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->LinkLevel[i].UpThreshold = table 1174 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 1189 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1191 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.MinVoltage = table 1195 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); table 1199 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.SclkFrequency, ÷rs); table 1206 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; table 1207 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; table 1208 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.DeepSleepDivId = 0; table 1217 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; table 1218 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; table 1219 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; table 1220 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; table 1221 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; table 1222 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; table 1223 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.CcPwrDynRm = 0; table 1224 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ACPILevel.CcPwrDynRm1 = 0; table 1228 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); table 1230 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); table 1231 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); table 1232 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); table 1233 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); table 1234 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); table 1235 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); table 1236 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); table 1237 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); table 1238 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); table 1241 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MinVoltage = table 1247 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MinMvdd = table 1250 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MinMvdd = 0; table 1270 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.DllCntl = table 1272 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MclkPwrmgtCntl = table 1274 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllAdFuncCntl = table 1276 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllDqFuncCntl = table 1278 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllFuncCntl = table 1280 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllFuncCntl_1 = table 1282 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllFuncCntl_2 = table 1284 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllSs1 = table 1286 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.MpllSs2 = table 1289 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.EnabledForThrottle = 0; table 1290 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.EnabledForActivity = 0; table 1291 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.UpHyst = 0; table 1292 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.DownHyst = 100; table 1293 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.VoltageDownHyst = 0; table 1295 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.ActivityLevel = table 1298 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.StutterEnable = 0; table 1299 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.StrobeEnable = 0; table 1300 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.EdcReadEnable = 0; table 1301 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.EdcWriteEnable = 0; table 1302 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryACPILevel.RttEnable = 0; table 1308 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 1320 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevelCount = (uint8_t) (mm_table->count); table 1321 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdBootLevel = 0; table 1323 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (count = 0; count < table->UvdLevelCount; count++) { table 1324 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; table 1325 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table 1326 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].MinVoltage.Vddc = table 1329 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].MinVoltage.VddGfx = table 1333 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].MinVoltage.Vddci = table 1336 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].MinVoltage.Phases = 1; table 1341 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].VclkFrequency, table 1348 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; table 1351 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].DclkFrequency, ÷rs); table 1356 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].DclkDivider = table 1359 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); table 1360 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); table 1368 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 1380 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevelCount = (uint8_t) (mm_table->count); table 1381 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceBootLevel = 0; table 1383 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (count = 0; count < table->VceLevelCount; count++) { table 1384 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].Frequency = table 1386 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].MinVoltage.Vddc = table 1389 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].MinVoltage.VddGfx = table 1393 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].MinVoltage.Vddci = table 1396 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].MinVoltage.Phases = 1; table 1400 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].Frequency, ÷rs); table 1405 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1407 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); table 1414 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 1425 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevelCount = (uint8_t) (mm_table->count); table 1426 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpBootLevel = 0; table 1428 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (count = 0; count < table->AcpLevelCount; count++) { table 1429 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].Frequency = table 1431 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].MinVoltage.Vddc = table 1434 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].MinVoltage.VddGfx = table 1438 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].MinVoltage.Vddci = table 1441 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].MinVoltage.Phases = 1; table 1445 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].Frequency, ÷rs); table 1449 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1451 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); table 1523 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 1529 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->GraphicsBootLevel = 0; table 1530 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryBootLevel = 0; table 1557 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BootVoltage.Vddc = table 1560 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BootVoltage.VddGfx = table 1563 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BootVoltage.Vddci = table 1566 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; table 1568 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); table 1747 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table) table 1755 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); table 1759 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= config; table 1767 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); table 1772 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= config; table 1782 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); table 1785 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); table 1791 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRConfig |= (config<<VRCONF_MVDD_SHIFT); table 2226 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 2239 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c tonga_populate_smc_voltage_tables(hwmgr, table); table 2243 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 2248 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 2251 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 2256 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SystemFlags |= 0x40; table 2259 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_ulv_state(hwmgr, table); table 2268 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_link_level(hwmgr, table); table 2280 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_acpi_level(hwmgr, table); table 2284 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_vce_level(hwmgr, table); table 2288 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_acp_level(hwmgr, table); table 2301 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_uvd_level(hwmgr, table); table 2305 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_smc_boot_level(hwmgr, table); table 2320 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->GraphicsVoltageChangeEnable = 1; table 2321 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->GraphicsThermThrottleEnable = 1; table 2322 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->GraphicsInterval = 1; table 2323 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VoltageInterval = 1; table 2324 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermalInterval = 1; table 2325 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->TemperatureLimitHigh = table 2328 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->TemperatureLimitLow = table 2331 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryVoltageChangeEnable = 1; table 2332 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryInterval = 1; table 2333 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VoltageResponseTime = 0; table 2334 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->PhaseResponseTime = 0; table 2335 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->MemoryThermThrottleEnable = 1; table 2350 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); table 2352 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->PCIeGenInterval = 1; table 2354 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_populate_vr_config(hwmgr, table); table 2357 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c data->vr_config = table->VRConfig; table 2358 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermGpio = 17; table 2359 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->SclkStepSize = 0x4000; table 2363 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; table 2367 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; table 2374 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; table 2378 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; table 2398 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; table 2400 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutPolarity = table 2404 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; table 2411 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; table 2417 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutGpio = 17; table 2418 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutPolarity = 1; table 2419 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; table 2423 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); table 2424 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); table 2425 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); table 2426 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); table 2427 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); table 2428 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); table 2429 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); table 2430 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); table 2431 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); table 2432 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); table 2438 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (uint8_t *)&(table->SystemFlags), table 2928 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table) table 2933 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (i = 0; i < table->last; i++) { table 2934 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[i].s0 = table 2935 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, table 2938 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[i].s1; table 2943 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, table 2948 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), table 2950 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), table 2953 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (i = 0; i < table->last; i++) table 2954 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 2956 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c ni_table->last = table->last; table 2958 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (i = 0; i < table->num_entries; i++) { table 2960 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[i].mclk_max; table 2961 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (j = 0; j < table->last; j++) { table 2963 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[i].mc_data[j]; table 2967 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c ni_table->num_entries = table->num_entries; table 2973 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c struct tonga_mc_reg_table *table) table 2979 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (i = 0, j = table->last; i < table->last; i++) { table 2983 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c switch (table->mc_reg_address[i].s1) { table 2988 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; table 2989 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; table 2990 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (k = 0; k < table->num_entries; k++) { table 2991 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 2993 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 3000 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; table 3001 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; table 3002 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (k = 0; k < table->num_entries; k++) { table 3003 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 3005 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 3008 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 3015 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table 3016 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; table 3017 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (k = 0; k < table->num_entries; k++) table 3018 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 3019 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; table 3027 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; table 3028 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; table 3029 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (k = 0; k < table->num_entries; k++) { table 3030 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[k].mc_data[j] = table 3032 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 3043 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->last = j; table 3048 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c static int tonga_set_valid_flag(struct tonga_mc_reg_table *table) table 3052 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (i = 0; i < table->last; i++) { table 3053 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c for (j = 1; j < table->num_entries; j++) { table 3054 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table 3055 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->mc_reg_table_entry[j].mc_data[i]) { table 3056 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->validflag |= (1<<i); table 3069 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c pp_atomctrl_mc_reg_table *table; table 3073 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); table 3075 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (table == NULL) table 3120 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); table 3123 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c result = tonga_copy_vbios_smc_reg_table(table, ni_table); table 3133 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c kfree(table); table 39 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c uint8_t *table, int16_t table_id) table 63 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c memcpy(table, priv->smu_tables.entry[table_id].table, table 70 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c uint8_t *table, int16_t table_id) table 81 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c memcpy(priv->smu_tables.entry[table_id].table, table, table 203 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[PPTABLE].table); table 218 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[WMTABLE].table); table 234 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[AVFSTABLE].table); table 251 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[TOOLSTABLE].table); table 266 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[AVFSFUSETABLE].table); table 278 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c if (priv->smu_tables.entry[TOOLSTABLE].table) table 281 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[TOOLSTABLE].table); table 285 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[AVFSTABLE].table); table 289 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[WMTABLE].table); table 293 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[PPTABLE].table); table 307 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[PPTABLE].table); table 310 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[WMTABLE].table); table 313 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[AVFSTABLE].table); table 314 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c if (priv->smu_tables.entry[TOOLSTABLE].table) table 317 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[TOOLSTABLE].table); table 320 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c &priv->smu_tables.entry[AVFSFUSETABLE].table); table 341 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, table 347 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c ret = vega10_copy_table_from_smc(hwmgr, table, table_id); table 349 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c ret = vega10_copy_table_to_smc(hwmgr, table, table_id); table 33 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h void *table; table 41 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c uint8_t *table, int16_t table_id) table 71 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c memcpy(table, priv->smu_tables.entry[table_id].table, table 83 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c uint8_t *table, int16_t table_id) table 95 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c memcpy(priv->smu_tables.entry[table_id].table, table, table 227 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_PPTABLE].table); table 241 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_WATERMARKS].table); table 257 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); table 272 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table); table 287 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_OVERDRIVE].table); table 301 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_SMU_METRICS].table); table 313 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_OVERDRIVE].table); table 317 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table); table 319 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].table) table 322 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); table 326 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_WATERMARKS].table); table 330 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_PPTABLE].table); table 345 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_PPTABLE].table); table 348 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_WATERMARKS].table); table 349 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].table) table 352 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); table 355 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table); table 358 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_OVERDRIVE].table); table 361 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c &priv->smu_tables.entry[TABLE_SMU_METRICS].table); table 379 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c static int vega12_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, table 385 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c ret = vega12_copy_table_from_smc(hwmgr, table, table_id); table 387 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c ret = vega12_copy_table_to_smc(hwmgr, table, table_id); table 34 drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h void *table; table 162 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c uint8_t *table, int16_t table_id) table 194 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c memcpy(table, priv->smu_tables.entry[table_id].table, table 206 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c uint8_t *table, int16_t table_id) table 219 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c memcpy(priv->smu_tables.entry[table_id].table, table, table 241 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c uint8_t *table, uint16_t workload_type) table 247 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, table 269 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c uint8_t *table, uint16_t workload_type) table 295 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table 425 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_PPTABLE].table); table 439 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_WATERMARKS].table); table 453 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); table 467 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_OVERDRIVE].table); table 481 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_SMU_METRICS].table); table 495 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table); table 507 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_SMU_METRICS].table); table 511 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_OVERDRIVE].table); table 515 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); table 519 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_WATERMARKS].table); table 523 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_PPTABLE].table); table 538 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_PPTABLE].table); table 541 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_WATERMARKS].table); table 544 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); table 547 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_OVERDRIVE].table); table 550 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_SMU_METRICS].table); table 553 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table); table 589 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, table 595 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c ret = vega20_copy_table_from_smc(hwmgr, table, table_id); table 597 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c ret = vega20_copy_table_to_smc(hwmgr, table, table_id); table 33 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h void *table; table 55 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h uint8_t *table, uint16_t workload_type); table 57 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h uint8_t *table, uint16_t workload_type); table 448 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c SMU75_Discrete_DpmTable *table) table 458 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( table 461 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SmioTable2.Pattern[level].Smio = table 463 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->Smio[level] |= table 466 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SmioMask2 = data->mvdd_voltage_table.mask_low; table 468 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); table 475 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 486 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US( table 488 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SmioTable1.Pattern[level].Smio = (uint8_t) level; table 490 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; table 494 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SmioMask1 = data->vddci_voltage_table.mask_low; table 500 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 517 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BapmVddcVidLoSidd[count] = table 519 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BapmVddcVidHiSidd[count] = table 521 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BapmVddcVidHiSidd2[count] = table 529 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 531 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c vegam_populate_smc_vddci_table(hwmgr, table); table 532 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c vegam_populate_smc_mvdd_table(hwmgr, table); table 533 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c vegam_populate_cac_table(hwmgr, table); table 562 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 564 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c return vegam_populate_ulv_level(hwmgr, &table->Ulv); table 568 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 579 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i].PcieGenSpeed = table 581 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( table 583 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i].EnabledForActivity = 1; table 584 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); table 585 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); table 586 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); table 667 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c SMU75_Discrete_DpmTable *table) table 678 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].vco_setting = table 680 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].postdiv = table 682 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].fcw_pcc = table 685 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_upper = table 687 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_lower = table 690 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); table 691 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); table 692 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); table 703 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; table 704 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; table 705 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; table 707 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; table 708 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; table 710 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); table 711 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); table 712 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); table 720 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 757 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c ((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / table 759 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; table 767 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c ((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / table 776 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c ((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / table 778 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; table 1108 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c SMU75_Discrete_DpmTable *table) table 1119 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1127 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c &table->ACPILevel.MinVoltage, &mvdd); table 1134 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c &(table->ACPILevel.SclkSetting)); table 1139 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ACPILevel.DeepSleepDivId = 0; table 1140 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ACPILevel.CcPwrDynRm = 0; table 1141 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ACPILevel.CcPwrDynRm1 = 0; table 1143 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); table 1144 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); table 1145 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); table 1146 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); table 1148 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); table 1149 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); table 1150 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); table 1151 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); table 1152 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); table 1153 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); table 1154 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); table 1155 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); table 1156 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); table 1157 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); table 1161 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; table 1164 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.MclkFrequency, table 1165 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c &table->MemoryACPILevel.MinVoltage, &mvdd); table 1183 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); table 1185 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.MinMvdd = 0; table 1187 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.StutterEnable = false; table 1189 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.EnabledForThrottle = 0; table 1190 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.EnabledForActivity = 0; table 1191 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.UpHyst = 0; table 1192 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.DownHyst = 100; table 1193 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.VoltageDownHyst = 0; table 1194 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryACPILevel.ActivityLevel = table 1197 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); table 1198 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); table 1204 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c SMU75_Discrete_DpmTable *table) table 1216 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevelCount = (uint8_t)(mm_table->count); table 1217 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceBootLevel = 0; table 1219 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c for (count = 0; count < table->VceLevelCount; count++) { table 1220 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].Frequency = mm_table->entries[count].eclk; table 1221 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].MinVoltage = 0; table 1222 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].MinVoltage |= table 1234 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].MinVoltage |= table 1236 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1240 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].Frequency, ÷rs); table 1245 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; table 1247 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); table 1248 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); table 1317 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 1329 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevelCount = (uint8_t)(mm_table->count); table 1330 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdBootLevel = 0; table 1332 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c for (count = 0; count < table->UvdLevelCount; count++) { table 1333 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].MinVoltage = 0; table 1334 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; table 1335 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table 1336 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].MinVoltage |= table 1347 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table 1348 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; table 1352 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].VclkFrequency, ÷rs); table 1356 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; table 1359 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].DclkFrequency, ÷rs); table 1363 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; table 1365 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); table 1366 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); table 1367 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); table 1374 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 1379 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->GraphicsBootLevel = 0; table 1380 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryBootLevel = 0; table 1385 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (uint32_t *)&(table->GraphicsBootLevel)); table 1389 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (uint32_t *)&(table->MemoryBootLevel)); table 1391 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BootVddc = data->vbios_boot_state.vddc_bootup_value * table 1393 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BootVddci = data->vbios_boot_state.vddci_bootup_value * table 1395 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * table 1398 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); table 1399 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); table 1400 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); table 1447 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1457 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); table 1458 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); table 1464 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( table 1466 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( table 1468 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->FanGainEdge = PP_HOST_TO_SMC_US( table 1470 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->FanGainHotspot = PP_HOST_TO_SMC_US( table 1479 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); table 1480 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); table 1573 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1591 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BTCGB_VDROOP_TABLE[0].a0 = table 1593 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BTCGB_VDROOP_TABLE[0].a1 = table 1595 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BTCGB_VDROOP_TABLE[0].a2 = table 1597 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BTCGB_VDROOP_TABLE[1].a0 = table 1599 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BTCGB_VDROOP_TABLE[1].a1 = table 1601 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->BTCGB_VDROOP_TABLE[1].a2 = table 1603 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[0].m1 = table 1605 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[0].m2 = table 1607 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[0].b = table 1609 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[0].m1_shift = 24; table 1610 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[0].m2_shift = 12; table 1611 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[1].m1 = table 1613 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[1].m2 = table 1615 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[1].b = table 1617 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[1].m1_shift = 24; table 1618 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AVFSGB_FUSE_TABLE[1].m2_shift = 12; table 1619 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); table 1675 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table) table 1683 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); table 1688 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= config; table 1697 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1700 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1703 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); table 1709 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1719 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig = (config << VRCONF_MVDD_SHIFT); table 1723 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig = (config << VRCONF_MVDD_SHIFT); table 1731 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig |= (config << VRCONF_MVDD_SHIFT); table 1931 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); table 1944 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c vegam_populate_smc_voltage_tables(hwmgr, table); table 1946 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SystemFlags = 0; table 1949 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1953 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1956 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1959 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_ulv_state(hwmgr, table); table 1966 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_smc_link_level(hwmgr, table); table 1978 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_smc_acpi_level(hwmgr, table); table 1982 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_smc_vce_level(hwmgr, table); table 1994 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_smc_uvd_level(hwmgr, table); table 1998 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_smc_boot_level(hwmgr, table); table 2022 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->CurrSclkPllRange = 0xff; table 2023 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->GraphicsVoltageChangeEnable = 1; table 2024 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->GraphicsThermThrottleEnable = 1; table 2025 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->GraphicsInterval = 1; table 2026 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VoltageInterval = 1; table 2027 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermalInterval = 1; table 2028 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->TemperatureLimitHigh = table 2031 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->TemperatureLimitLow = table 2034 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryVoltageChangeEnable = 1; table 2035 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryInterval = 1; table 2036 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VoltageResponseTime = 0; table 2037 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->PhaseResponseTime = 0; table 2038 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->MemoryThermThrottleEnable = 1; table 2043 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->PCIeBootLinkLevel = table 2045 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->PCIeGenInterval = 1; table 2046 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRConfig = 0; table 2048 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c result = vegam_populate_vr_config(hwmgr, table); table 2052 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermGpio = 17; table 2053 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->SclkStepSize = 0x4000; table 2057 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2059 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRHotLevel = table 2062 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; table 2069 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2076 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; table 2084 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; table 2091 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutPolarity = table 2094 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; table 2101 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; table 2103 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutGpio = 17; table 2104 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutPolarity = 1; table 2105 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; table 2117 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->Ulv.BifSclkDfs = table 2120 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->LinkLevel[i - 1].BifSclkDfs = table 2125 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); table 2127 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); table 2128 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); table 2129 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); table 2130 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); table 2131 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); table 2132 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); table 2133 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); table 2134 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); table 2135 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); table 2136 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); table 2142 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (uint8_t *)&(table->SystemFlags), table 2295 drivers/gpu/drm/amd/powerplay/vega20_ppt.c static uint32_t vega20_find_lowest_dpm_level(struct vega20_single_dpm_table *table) table 2299 drivers/gpu/drm/amd/powerplay/vega20_ppt.c for (i = 0; i < table->count; i++) { table 2300 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (table->dpm_levels[i].enabled) table 2303 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (i >= table->count) { table 2305 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->dpm_levels[i].enabled = true; table 2311 drivers/gpu/drm/amd/powerplay/vega20_ppt.c static uint32_t vega20_find_highest_dpm_level(struct vega20_single_dpm_table *table) table 2315 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!table) { table 2319 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (table->count <= 0) { table 2323 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (table->count > MAX_REGULAR_DPM_NUMBER) { table 2328 drivers/gpu/drm/amd/powerplay/vega20_ppt.c for (i = table->count - 1; i >= 0; i--) { table 2329 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (table->dpm_levels[i].enabled) table 2334 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->dpm_levels[i].enabled = true; table 3066 drivers/gpu/drm/amd/powerplay/vega20_ppt.c Watermarks_t *table = watermarks; table 3068 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!table || !clock_ranges) table 3076 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[1][i].MinClock = table 3080 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[1][i].MaxClock = table 3084 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[1][i].MinUclk = table 3088 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[1][i].MaxUclk = table 3092 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[1][i].WmSetting = (uint8_t) table 3097 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[0][i].MinClock = table 3101 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[0][i].MaxClock = table 3105 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[0][i].MinUclk = table 3109 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[0][i].MaxUclk = table 3113 drivers/gpu/drm/amd/powerplay/vega20_ppt.c table->WatermarkRow[0][i].WmSetting = (uint8_t) table 509 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c struct komeda_format_caps_table *table = &mdev->fmt_tbl; table 511 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c table->format_caps = d71_format_caps_table; table 512 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c table->format_mod_supported = d71_format_mod_supported; table 513 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c table->n_formats = ARRAY_SIZE(d71_format_caps_table); table 13 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c komeda_get_format_caps(struct komeda_format_caps_table *table, table 21 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c for (id = 0; id < table->n_formats; id++) { table 22 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c caps = &table->format_caps[id]; table 95 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c bool komeda_format_mod_supported(struct komeda_format_caps_table *table, table 101 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c caps = komeda_get_format_caps(table, fourcc, modifier); table 108 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c if (table->format_mod_supported) table 109 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c return table->format_mod_supported(caps, layer_type, modifier, table 115 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, table 122 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c fmts = kcalloc(table->n_formats, sizeof(u32), GFP_KERNEL); table 126 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c for (i = 0; i < table->n_formats; i++) { table 127 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c cap = &table->format_caps[i]; table 97 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h komeda_get_format_caps(struct komeda_format_caps_table *table, table 103 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, table 108 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h bool komeda_format_mod_supported(struct komeda_format_caps_table *table, table 50 drivers/gpu/drm/drm_hashtab.c ht->table = NULL; table 51 drivers/gpu/drm/drm_hashtab.c if (size <= PAGE_SIZE / sizeof(*ht->table)) table 52 drivers/gpu/drm/drm_hashtab.c ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); table 54 drivers/gpu/drm/drm_hashtab.c ht->table = vzalloc(array_size(size, sizeof(*ht->table))); table 55 drivers/gpu/drm/drm_hashtab.c if (!ht->table) { table 72 drivers/gpu/drm/drm_hashtab.c h_list = &ht->table[hashed_key]; table 85 drivers/gpu/drm/drm_hashtab.c h_list = &ht->table[hashed_key]; table 103 drivers/gpu/drm/drm_hashtab.c h_list = &ht->table[hashed_key]; table 122 drivers/gpu/drm/drm_hashtab.c h_list = &ht->table[hashed_key]; table 204 drivers/gpu/drm/drm_hashtab.c if (ht->table) { table 205 drivers/gpu/drm/drm_hashtab.c kvfree(ht->table); table 206 drivers/gpu/drm/drm_hashtab.c ht->table = NULL; table 40 drivers/gpu/drm/i915/gt/intel_mocs.c const struct drm_i915_mocs_entry *table; table 283 drivers/gpu/drm/i915/gt/intel_mocs.c struct drm_i915_mocs_table *table) table 289 drivers/gpu/drm/i915/gt/intel_mocs.c table->size = ARRAY_SIZE(tigerlake_mocs_table); table 290 drivers/gpu/drm/i915/gt/intel_mocs.c table->table = tigerlake_mocs_table; table 291 drivers/gpu/drm/i915/gt/intel_mocs.c table->n_entries = GEN11_NUM_MOCS_ENTRIES; table 294 drivers/gpu/drm/i915/gt/intel_mocs.c table->size = ARRAY_SIZE(icelake_mocs_table); table 295 drivers/gpu/drm/i915/gt/intel_mocs.c table->table = icelake_mocs_table; table 296 drivers/gpu/drm/i915/gt/intel_mocs.c table->n_entries = GEN11_NUM_MOCS_ENTRIES; table 299 drivers/gpu/drm/i915/gt/intel_mocs.c table->size = ARRAY_SIZE(skylake_mocs_table); table 300 drivers/gpu/drm/i915/gt/intel_mocs.c table->n_entries = GEN9_NUM_MOCS_ENTRIES; table 301 drivers/gpu/drm/i915/gt/intel_mocs.c table->table = skylake_mocs_table; table 304 drivers/gpu/drm/i915/gt/intel_mocs.c table->size = ARRAY_SIZE(broxton_mocs_table); table 305 drivers/gpu/drm/i915/gt/intel_mocs.c table->n_entries = GEN9_NUM_MOCS_ENTRIES; table 306 drivers/gpu/drm/i915/gt/intel_mocs.c table->table = broxton_mocs_table; table 317 drivers/gpu/drm/i915/gt/intel_mocs.c for (i = 0; i < table->size; i++) table 318 drivers/gpu/drm/i915/gt/intel_mocs.c if (WARN_ON(table->table[i].l3cc_value & table 351 drivers/gpu/drm/i915/gt/intel_mocs.c static u32 get_entry_control(const struct drm_i915_mocs_table *table, table 354 drivers/gpu/drm/i915/gt/intel_mocs.c if (table->table[index].used) table 355 drivers/gpu/drm/i915/gt/intel_mocs.c return table->table[index].control_value; table 357 drivers/gpu/drm/i915/gt/intel_mocs.c return table->table[I915_MOCS_PTE].control_value; table 371 drivers/gpu/drm/i915/gt/intel_mocs.c struct drm_i915_mocs_table table; table 382 drivers/gpu/drm/i915/gt/intel_mocs.c if (!get_mocs_settings(gt, &table)) table 386 drivers/gpu/drm/i915/gt/intel_mocs.c unused_value = table.table[I915_MOCS_PTE].control_value; table 388 drivers/gpu/drm/i915/gt/intel_mocs.c for (index = 0; index < table.size; index++) { table 389 drivers/gpu/drm/i915/gt/intel_mocs.c u32 value = get_entry_control(&table, index); table 397 drivers/gpu/drm/i915/gt/intel_mocs.c for (; index < table.n_entries; index++) table 406 drivers/gpu/drm/i915/gt/intel_mocs.c struct drm_i915_mocs_table table; table 411 drivers/gpu/drm/i915/gt/intel_mocs.c if (!get_mocs_settings(gt, &table)) table 414 drivers/gpu/drm/i915/gt/intel_mocs.c if (GEM_DEBUG_WARN_ON(table.size > table.n_entries)) table 417 drivers/gpu/drm/i915/gt/intel_mocs.c for (index = 0; index < table.size; index++) table 420 drivers/gpu/drm/i915/gt/intel_mocs.c table.table[index].control_value); table 427 drivers/gpu/drm/i915/gt/intel_mocs.c for (; index < table.n_entries; index++) table 430 drivers/gpu/drm/i915/gt/intel_mocs.c table.table[0].control_value); table 434 drivers/gpu/drm/i915/gt/intel_mocs.c const struct drm_i915_mocs_table *table) table 441 drivers/gpu/drm/i915/gt/intel_mocs.c if (GEM_WARN_ON(table->size > table->n_entries)) table 445 drivers/gpu/drm/i915/gt/intel_mocs.c unused_value = table->table[I915_MOCS_PTE].control_value; table 447 drivers/gpu/drm/i915/gt/intel_mocs.c cs = intel_ring_begin(rq, 2 + 2 * table->n_entries); table 451 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries); table 453 drivers/gpu/drm/i915/gt/intel_mocs.c for (index = 0; index < table->size; index++) { table 454 drivers/gpu/drm/i915/gt/intel_mocs.c u32 value = get_entry_control(table, index); table 461 drivers/gpu/drm/i915/gt/intel_mocs.c for (; index < table->n_entries; index++) { table 476 drivers/gpu/drm/i915/gt/intel_mocs.c static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table, table 479 drivers/gpu/drm/i915/gt/intel_mocs.c if (table->table[index].used) table 480 drivers/gpu/drm/i915/gt/intel_mocs.c return table->table[index].l3cc_value; table 482 drivers/gpu/drm/i915/gt/intel_mocs.c return table->table[I915_MOCS_PTE].l3cc_value; table 485 drivers/gpu/drm/i915/gt/intel_mocs.c static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, table 493 drivers/gpu/drm/i915/gt/intel_mocs.c const struct drm_i915_mocs_table *table) table 499 drivers/gpu/drm/i915/gt/intel_mocs.c if (GEM_WARN_ON(table->size > table->n_entries)) table 503 drivers/gpu/drm/i915/gt/intel_mocs.c unused_value = table->table[I915_MOCS_PTE].l3cc_value; table 505 drivers/gpu/drm/i915/gt/intel_mocs.c cs = intel_ring_begin(rq, 2 + table->n_entries); table 509 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2); table 511 drivers/gpu/drm/i915/gt/intel_mocs.c for (i = 0; i < table->size / 2; i++) { table 512 drivers/gpu/drm/i915/gt/intel_mocs.c u16 low = get_entry_l3cc(table, 2 * i); table 513 drivers/gpu/drm/i915/gt/intel_mocs.c u16 high = get_entry_l3cc(table, 2 * i + 1); table 516 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = l3cc_combine(table, low, high); table 520 drivers/gpu/drm/i915/gt/intel_mocs.c if (table->size & 0x01) { table 521 drivers/gpu/drm/i915/gt/intel_mocs.c u16 low = get_entry_l3cc(table, 2 * i); table 524 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = l3cc_combine(table, low, unused_value); table 529 drivers/gpu/drm/i915/gt/intel_mocs.c for (; i < table->n_entries / 2; i++) { table 531 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = l3cc_combine(table, unused_value, unused_value); table 543 drivers/gpu/drm/i915/gt/intel_mocs.c struct drm_i915_mocs_table table; table 547 drivers/gpu/drm/i915/gt/intel_mocs.c if (!get_mocs_settings(gt, &table)) table 551 drivers/gpu/drm/i915/gt/intel_mocs.c unused_value = table.table[I915_MOCS_PTE].l3cc_value; table 553 drivers/gpu/drm/i915/gt/intel_mocs.c for (i = 0; i < table.size / 2; i++) { table 554 drivers/gpu/drm/i915/gt/intel_mocs.c u16 low = get_entry_l3cc(&table, 2 * i); table 555 drivers/gpu/drm/i915/gt/intel_mocs.c u16 high = get_entry_l3cc(&table, 2 * i + 1); table 559 drivers/gpu/drm/i915/gt/intel_mocs.c l3cc_combine(&table, low, high)); table 563 drivers/gpu/drm/i915/gt/intel_mocs.c if (table.size & 0x01) { table 564 drivers/gpu/drm/i915/gt/intel_mocs.c u16 low = get_entry_l3cc(&table, 2 * i); table 568 drivers/gpu/drm/i915/gt/intel_mocs.c l3cc_combine(&table, low, unused_value)); table 573 drivers/gpu/drm/i915/gt/intel_mocs.c for (; i < table.n_entries / 2; i++) table 576 drivers/gpu/drm/i915/gt/intel_mocs.c l3cc_combine(&table, unused_value, table 179 drivers/gpu/drm/i915/i915_cmd_parser.c const struct drm_i915_cmd_descriptor *table; table 796 drivers/gpu/drm/i915/i915_cmd_parser.c const struct drm_i915_cmd_table *table = &cmd_tables[i]; table 800 drivers/gpu/drm/i915/i915_cmd_parser.c for (j = 0; j < table->count; j++) { table 802 drivers/gpu/drm/i915/i915_cmd_parser.c &table->table[j]; table 848 drivers/gpu/drm/i915/i915_cmd_parser.c const struct drm_i915_reg_table *table; table 851 drivers/gpu/drm/i915/i915_cmd_parser.c table = &engine->reg_tables[i]; table 852 drivers/gpu/drm/i915/i915_cmd_parser.c if (!check_sorted(engine, table->regs, table->num_regs)) table 896 drivers/gpu/drm/i915/i915_cmd_parser.c const struct drm_i915_cmd_table *table = &cmd_tables[i]; table 898 drivers/gpu/drm/i915/i915_cmd_parser.c for (j = 0; j < table->count; j++) { table 900 drivers/gpu/drm/i915/i915_cmd_parser.c &table->table[j]; table 1098 drivers/gpu/drm/i915/i915_cmd_parser.c __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) table 1103 drivers/gpu/drm/i915/i915_cmd_parser.c int ret = addr - i915_mmio_reg_offset(table[mid].addr); table 1109 drivers/gpu/drm/i915/i915_cmd_parser.c return &table[mid]; table 1117 drivers/gpu/drm/i915/i915_cmd_parser.c const struct drm_i915_reg_table *table = engine->reg_tables; table 1121 drivers/gpu/drm/i915/i915_cmd_parser.c for (; !reg && (count > 0); ++table, --count) table 1122 drivers/gpu/drm/i915/i915_cmd_parser.c reg = __find_reg(table->regs, table->num_regs, addr); table 354 drivers/gpu/drm/msm/adreno/a6xx_hfi.c struct a6xx_hfi_queue_table_header *table = hfi->virt; table 355 drivers/gpu/drm/msm/adreno/a6xx_hfi.c struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); table 363 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table_size = sizeof(*table); table 367 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table->version = 0; table 368 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table->size = table_size; table 370 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table->qhdr0_offset = sizeof(*table) >> 2; table 371 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; table 372 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table->num_queues = ARRAY_SIZE(gmu->queues); table 373 drivers/gpu/drm/msm/adreno/a6xx_hfi.c table->active_queues = ARRAY_SIZE(gmu->queues); table 15 drivers/gpu/drm/msm/msm_gpummu.c uint32_t *table; table 52 drivers/gpu/drm/msm/msm_gpummu.c gpummu->table[idx] = addr | prot_bits; table 71 drivers/gpu/drm/msm/msm_gpummu.c gpummu->table[idx] = 0; table 83 drivers/gpu/drm/msm/msm_gpummu.c dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, table 105 drivers/gpu/drm/msm/msm_gpummu.c gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base, table 107 drivers/gpu/drm/msm/msm_gpummu.c if (!gpummu->table) { table 167 drivers/gpu/drm/nouveau/dispnv04/disp.h nouveau_bios_run_init_table(struct drm_device *dev, u16 table, table 170 drivers/gpu/drm/nouveau/dispnv04/disp.h nvbios_init(&nvxx_bios(&nouveau_drm(dev)->client.device)->subdev, table, table 999 drivers/gpu/drm/nouveau/nouveau_bios.c struct bit_table *table) table 1005 drivers/gpu/drm/nouveau/nouveau_bios.c if (bit_table(dev, table->id, &bitentry) == 0) table 1006 drivers/gpu/drm/nouveau/nouveau_bios.c return table->parse_fn(dev, bios, &bitentry); table 1008 drivers/gpu/drm/nouveau/nouveau_bios.c NV_INFO(drm, "BIT table '%c' not found\n", table->id); table 64 drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c u32 table, entry; table 66 drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len); table 67 drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c if (!table || !cnt) table 87 drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c entry = table + hdr + i * len; table 464 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 table = init_xlat_table(init); table 465 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c if (table) { table 466 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 data = nvbios_rd16(bios, table + (index * 2)); table 482 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 table = init_condition_table(init); table 483 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c if (table) { table 484 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0); table 485 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4); table 486 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u32 val = nvbios_rd32(bios, table + (cond * 12) + 8); table 498 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 table = init_io_condition_table(init); table 499 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c if (table) { table 500 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 port = nvbios_rd16(bios, table + (cond * 5) + 0); table 501 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 index = nvbios_rd08(bios, table + (cond * 5) + 2); table 502 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 mask = nvbios_rd08(bios, table + (cond * 5) + 3); table 503 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 value = nvbios_rd08(bios, table + (cond * 5) + 4); table 515 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 table = init_io_flag_condition_table(init); table 516 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c if (table) { table 517 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 port = nvbios_rd16(bios, table + (cond * 9) + 0); table 518 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 index = nvbios_rd08(bios, table + (cond * 9) + 2); table 519 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 mask = nvbios_rd08(bios, table + (cond * 9) + 3); table 520 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4); table 521 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 data = nvbios_rd16(bios, table + (cond * 9) + 5); table 522 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7); table 523 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u8 value = nvbios_rd08(bios, table + (cond * 9) + 8); table 1731 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u16 table; table 1735 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c table = init_macro_table(init); table 1736 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c if (table) { table 1737 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0); table 1738 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c u32 data = nvbios_rd32(bios, table + (macro * 8) + 4); table 3803 drivers/gpu/drm/omapdrm/dss/dispc.c u32 *table = dispc->gamma_table[channel]; table 3809 drivers/gpu/drm/omapdrm/dss/dispc.c u32 v = table[i]; table 3849 drivers/gpu/drm/omapdrm/dss/dispc.c u32 *table = dispc->gamma_table[channel]; table 3882 drivers/gpu/drm/omapdrm/dss/dispc.c table[first + j] = (r << (gdesc->bits * 2)) | table 52 drivers/gpu/drm/qxl/qxl_prime.c struct sg_table *table) table 1175 drivers/gpu/drm/radeon/btc_dpm.c void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, table 1180 drivers/gpu/drm/radeon/btc_dpm.c if ((table == NULL) || (table->count == 0)) { table 1185 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0; i < table->count; i++) { table 1186 drivers/gpu/drm/radeon/btc_dpm.c if (clock < table->entries[i].clk) table 1187 drivers/gpu/drm/radeon/btc_dpm.c clock = table->entries[i].clk; table 1192 drivers/gpu/drm/radeon/btc_dpm.c void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, table 1197 drivers/gpu/drm/radeon/btc_dpm.c if ((table == NULL) || (table->count == 0)) table 1200 drivers/gpu/drm/radeon/btc_dpm.c for (i= 0; i < table->count; i++) { table 1201 drivers/gpu/drm/radeon/btc_dpm.c if (clock <= table->entries[i].clk) { table 1202 drivers/gpu/drm/radeon/btc_dpm.c if (*voltage < table->entries[i].v) table 1203 drivers/gpu/drm/radeon/btc_dpm.c *voltage = (u16)((table->entries[i].v < max_voltage) ? table 1204 drivers/gpu/drm/radeon/btc_dpm.c table->entries[i].v : max_voltage); table 1297 drivers/gpu/drm/radeon/btc_dpm.c static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) table 1301 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0; i < table->count; i++) { table 1302 drivers/gpu/drm/radeon/btc_dpm.c if (voltage <= table->entries[i].value) table 1303 drivers/gpu/drm/radeon/btc_dpm.c return table->entries[i].value; table 1306 drivers/gpu/drm/radeon/btc_dpm.c return table->entries[table->count - 1].value; table 1399 drivers/gpu/drm/radeon/btc_dpm.c RV770_SMC_STATETABLE *table) table 1408 drivers/gpu/drm/radeon/btc_dpm.c &table->ULVState.levels[0], table 1411 drivers/gpu/drm/radeon/btc_dpm.c table->ULVState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; table 1412 drivers/gpu/drm/radeon/btc_dpm.c table->ULVState.levels[0].ACIndex = 1; table 1414 drivers/gpu/drm/radeon/btc_dpm.c table->ULVState.levels[1] = table->ULVState.levels[0]; table 1415 drivers/gpu/drm/radeon/btc_dpm.c table->ULVState.levels[2] = table->ULVState.levels[0]; table 1417 drivers/gpu/drm/radeon/btc_dpm.c table->ULVState.flags |= PPSMC_SWSTATE_FLAG_DC; table 1428 drivers/gpu/drm/radeon/btc_dpm.c RV770_SMC_STATETABLE *table) table 1430 drivers/gpu/drm/radeon/btc_dpm.c int ret = cypress_populate_smc_acpi_state(rdev, table); table 1433 drivers/gpu/drm/radeon/btc_dpm.c table->ACPIState.levels[0].ACIndex = 0; table 1434 drivers/gpu/drm/radeon/btc_dpm.c table->ACPIState.levels[1].ACIndex = 0; table 1435 drivers/gpu/drm/radeon/btc_dpm.c table->ACPIState.levels[2].ACIndex = 0; table 1633 drivers/gpu/drm/radeon/btc_dpm.c RV770_SMC_STATETABLE *table = &pi->smc_statetable; table 1636 drivers/gpu/drm/radeon/btc_dpm.c memset(table, 0, sizeof(RV770_SMC_STATETABLE)); table 1638 drivers/gpu/drm/radeon/btc_dpm.c cypress_populate_smc_voltage_tables(rdev, table); table 1643 drivers/gpu/drm/radeon/btc_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; table 1646 drivers/gpu/drm/radeon/btc_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; table 1649 drivers/gpu/drm/radeon/btc_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; table 1654 drivers/gpu/drm/radeon/btc_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1657 drivers/gpu/drm/radeon/btc_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; table 1660 drivers/gpu/drm/radeon/btc_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1663 drivers/gpu/drm/radeon/btc_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1665 drivers/gpu/drm/radeon/btc_dpm.c ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table); table 1673 drivers/gpu/drm/radeon/btc_dpm.c ret = btc_populate_smc_acpi_state(rdev, table); table 1678 drivers/gpu/drm/radeon/btc_dpm.c ret = btc_populate_ulv_state(rdev, table); table 1683 drivers/gpu/drm/radeon/btc_dpm.c table->driverState = table->initialState; table 1687 drivers/gpu/drm/radeon/btc_dpm.c (u8 *)table, table 1902 drivers/gpu/drm/radeon/btc_dpm.c static void btc_set_valid_flag(struct evergreen_mc_reg_table *table) table 1906 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0; i < table->last; i++) { table 1907 drivers/gpu/drm/radeon/btc_dpm.c for (j = 1; j < table->num_entries; j++) { table 1908 drivers/gpu/drm/radeon/btc_dpm.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table 1909 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[j].mc_data[i]) { table 1910 drivers/gpu/drm/radeon/btc_dpm.c table->valid_flag |= (1 << i); table 1918 drivers/gpu/drm/radeon/btc_dpm.c struct evergreen_mc_reg_table *table) table 1924 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0, j = table->last; i < table->last; i++) { table 1925 drivers/gpu/drm/radeon/btc_dpm.c switch (table->mc_reg_address[i].s1) { table 1928 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; table 1929 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; table 1930 drivers/gpu/drm/radeon/btc_dpm.c for (k = 0; k < table->num_entries; k++) { table 1931 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 1933 drivers/gpu/drm/radeon/btc_dpm.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 1941 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; table 1942 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; table 1943 drivers/gpu/drm/radeon/btc_dpm.c for (k = 0; k < table->num_entries; k++) { table 1944 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 1946 drivers/gpu/drm/radeon/btc_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 1948 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 1957 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; table 1958 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; table 1959 drivers/gpu/drm/radeon/btc_dpm.c for (k = 0; k < table->num_entries; k++) { table 1960 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 1962 drivers/gpu/drm/radeon/btc_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 1974 drivers/gpu/drm/radeon/btc_dpm.c table->last = j; table 1979 drivers/gpu/drm/radeon/btc_dpm.c static void btc_set_s0_mc_reg_index(struct evergreen_mc_reg_table *table) table 1984 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0; i < table->last; i++) { table 1985 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_address[i].s0 = table 1986 drivers/gpu/drm/radeon/btc_dpm.c btc_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? table 1987 drivers/gpu/drm/radeon/btc_dpm.c address : table->mc_reg_address[i].s1; table 1991 drivers/gpu/drm/radeon/btc_dpm.c static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, table 1996 drivers/gpu/drm/radeon/btc_dpm.c if (table->last > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) table 1999 drivers/gpu/drm/radeon/btc_dpm.c if (table->num_entries > MAX_AC_TIMING_ENTRIES) table 2002 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0; i < table->last; i++) table 2003 drivers/gpu/drm/radeon/btc_dpm.c eg_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 2004 drivers/gpu/drm/radeon/btc_dpm.c eg_table->last = table->last; table 2006 drivers/gpu/drm/radeon/btc_dpm.c for (i = 0; i < table->num_entries; i++) { table 2008 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[i].mclk_max; table 2009 drivers/gpu/drm/radeon/btc_dpm.c for(j = 0; j < table->last; j++) table 2011 drivers/gpu/drm/radeon/btc_dpm.c table->mc_reg_table_entry[i].mc_data[j]; table 2013 drivers/gpu/drm/radeon/btc_dpm.c eg_table->num_entries = table->num_entries; table 2021 drivers/gpu/drm/radeon/btc_dpm.c struct atom_mc_reg_table *table; table 2026 drivers/gpu/drm/radeon/btc_dpm.c table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); table 2027 drivers/gpu/drm/radeon/btc_dpm.c if (!table) table 2043 drivers/gpu/drm/radeon/btc_dpm.c ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); table 2048 drivers/gpu/drm/radeon/btc_dpm.c ret = btc_copy_vbios_mc_reg_table(table, eg_table); table 2062 drivers/gpu/drm/radeon/btc_dpm.c kfree(table); table 50 drivers/gpu/drm/radeon/btc_dpm.h void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, table 52 drivers/gpu/drm/radeon/btc_dpm.h void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, table 1305 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table = &pi->smc_state_table; table 1311 drivers/gpu/drm/radeon/ci_dpm.c table->FpsHighT = cpu_to_be16(tmp); table 1314 drivers/gpu/drm/radeon/ci_dpm.c table->FpsLowT = cpu_to_be16(tmp); table 2219 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2224 drivers/gpu/drm/radeon/ci_dpm.c table->VddcLevelCount = pi->vddc_voltage_table.count; table 2225 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->VddcLevelCount; count++) { table 2228 drivers/gpu/drm/radeon/ci_dpm.c &table->VddcLevel[count]); table 2231 drivers/gpu/drm/radeon/ci_dpm.c table->VddcLevel[count].Smio |= table 2234 drivers/gpu/drm/radeon/ci_dpm.c table->VddcLevel[count].Smio = 0; table 2236 drivers/gpu/drm/radeon/ci_dpm.c table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); table 2242 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2247 drivers/gpu/drm/radeon/ci_dpm.c table->VddciLevelCount = pi->vddci_voltage_table.count; table 2248 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->VddciLevelCount; count++) { table 2251 drivers/gpu/drm/radeon/ci_dpm.c &table->VddciLevel[count]); table 2254 drivers/gpu/drm/radeon/ci_dpm.c table->VddciLevel[count].Smio |= table 2257 drivers/gpu/drm/radeon/ci_dpm.c table->VddciLevel[count].Smio = 0; table 2259 drivers/gpu/drm/radeon/ci_dpm.c table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); table 2265 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2270 drivers/gpu/drm/radeon/ci_dpm.c table->MvddLevelCount = pi->mvdd_voltage_table.count; table 2271 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->MvddLevelCount; count++) { table 2274 drivers/gpu/drm/radeon/ci_dpm.c &table->MvddLevel[count]); table 2277 drivers/gpu/drm/radeon/ci_dpm.c table->MvddLevel[count].Smio |= table 2280 drivers/gpu/drm/radeon/ci_dpm.c table->MvddLevel[count].Smio = 0; table 2282 drivers/gpu/drm/radeon/ci_dpm.c table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); table 2288 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2292 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_vddc_table(rdev, table); table 2296 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_vddci_table(rdev, table); table 2300 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_mvdd_table(rdev, table); table 2627 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2634 drivers/gpu/drm/radeon/ci_dpm.c table->LinkLevel[i].PcieGenSpeed = table 2636 drivers/gpu/drm/radeon/ci_dpm.c table->LinkLevel[i].PcieLaneCount = table 2638 drivers/gpu/drm/radeon/ci_dpm.c table->LinkLevel[i].EnabledForActivity = 1; table 2639 drivers/gpu/drm/radeon/ci_dpm.c table->LinkLevel[i].DownT = cpu_to_be32(5); table 2640 drivers/gpu/drm/radeon/ci_dpm.c table->LinkLevel[i].UpT = cpu_to_be32(30); table 2649 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2655 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevelCount = table 2658 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->UvdLevelCount; count++) { table 2659 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].VclkFrequency = table 2661 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].DclkFrequency = table 2663 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].MinVddc = table 2665 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].MinVddcPhases = 1; table 2669 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].VclkFrequency, false, ÷rs); table 2673 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; table 2677 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].DclkFrequency, false, ÷rs); table 2681 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; table 2683 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); table 2684 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); table 2685 drivers/gpu/drm/radeon/ci_dpm.c table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); table 2692 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2698 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevelCount = table 2701 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->VceLevelCount; count++) { table 2702 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].Frequency = table 2704 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].MinVoltage = table 2706 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].MinPhases = 1; table 2710 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].Frequency, false, ÷rs); table 2714 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].Divider = (u8)dividers.post_divider; table 2716 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); table 2717 drivers/gpu/drm/radeon/ci_dpm.c table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); table 2725 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2731 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevelCount = (u8) table 2734 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->AcpLevelCount; count++) { table 2735 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].Frequency = table 2737 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].MinVoltage = table 2739 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].MinPhases = 1; table 2743 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].Frequency, false, ÷rs); table 2747 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].Divider = (u8)dividers.post_divider; table 2749 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); table 2750 drivers/gpu/drm/radeon/ci_dpm.c table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); table 2757 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 2763 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevelCount = table 2766 drivers/gpu/drm/radeon/ci_dpm.c for (count = 0; count < table->SamuLevelCount; count++) { table 2767 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].Frequency = table 2769 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].MinVoltage = table 2771 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].MinPhases = 1; table 2775 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].Frequency, false, ÷rs); table 2779 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].Divider = (u8)dividers.post_divider; table 2781 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); table 2782 drivers/gpu/drm/radeon/ci_dpm.c table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); table 2990 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table) table 3001 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; table 3004 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); table 3006 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); table 3008 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; table 3010 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; table 3014 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SclkFrequency, false, ÷rs); table 3018 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SclkDid = (u8)dividers.post_divider; table 3019 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; table 3020 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.DeepSleepDivId = 0; table 3028 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; table 3029 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; table 3030 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; table 3031 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; table 3032 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; table 3033 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; table 3034 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CcPwrDynRm = 0; table 3035 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CcPwrDynRm1 = 0; table 3037 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); table 3038 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); table 3039 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); table 3040 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); table 3041 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); table 3042 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); table 3043 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); table 3044 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); table 3045 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); table 3046 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); table 3047 drivers/gpu/drm/radeon/ci_dpm.c table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); table 3049 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; table 3050 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; table 3054 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MinVddci = table 3057 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MinVddci = table 3062 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MinMvdd = 0; table 3064 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MinMvdd = table 3072 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); table 3073 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); table 3074 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllAdFuncCntl = table 3076 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllDqFuncCntl = table 3078 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllFuncCntl = table 3080 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllFuncCntl_1 = table 3082 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllFuncCntl_2 = table 3084 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); table 3085 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); table 3087 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.EnabledForThrottle = 0; table 3088 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.EnabledForActivity = 0; table 3089 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.UpH = 0; table 3090 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.DownH = 100; table 3091 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.VoltageDownH = 0; table 3092 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.ActivityLevel = table 3095 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.StutterEnable = false; table 3096 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.StrobeEnable = false; table 3097 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.EdcReadEnable = false; table 3098 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.EdcWriteEnable = false; table 3099 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryACPILevel.RttEnable = false; table 3539 drivers/gpu/drm/radeon/ci_dpm.c static int ci_find_boot_level(struct ci_single_dpm_table *table, table 3545 drivers/gpu/drm/radeon/ci_dpm.c for(i = 0; i < table->count; i++) { table 3546 drivers/gpu/drm/radeon/ci_dpm.c if (value == table->dpm_levels[i].value) { table 3560 drivers/gpu/drm/radeon/ci_dpm.c SMU7_Discrete_DpmTable *table = &pi->smc_state_table; table 3568 drivers/gpu/drm/radeon/ci_dpm.c ci_populate_smc_voltage_tables(rdev, table); table 3573 drivers/gpu/drm/radeon/ci_dpm.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 3576 drivers/gpu/drm/radeon/ci_dpm.c table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 3579 drivers/gpu/drm/radeon/ci_dpm.c table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 3596 drivers/gpu/drm/radeon/ci_dpm.c ci_populate_smc_link_level(rdev, table); table 3598 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_acpi_level(rdev, table); table 3602 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_vce_level(rdev, table); table 3606 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_acp_level(rdev, table); table 3610 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_samu_level(rdev, table); table 3618 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_populate_smc_uvd_level(rdev, table); table 3622 drivers/gpu/drm/radeon/ci_dpm.c table->UvdBootLevel = 0; table 3623 drivers/gpu/drm/radeon/ci_dpm.c table->VceBootLevel = 0; table 3624 drivers/gpu/drm/radeon/ci_dpm.c table->AcpBootLevel = 0; table 3625 drivers/gpu/drm/radeon/ci_dpm.c table->SamuBootLevel = 0; table 3626 drivers/gpu/drm/radeon/ci_dpm.c table->GraphicsBootLevel = 0; table 3627 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryBootLevel = 0; table 3637 drivers/gpu/drm/radeon/ci_dpm.c table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; table 3638 drivers/gpu/drm/radeon/ci_dpm.c table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; table 3639 drivers/gpu/drm/radeon/ci_dpm.c table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; table 3647 drivers/gpu/drm/radeon/ci_dpm.c table->UVDInterval = 1; table 3648 drivers/gpu/drm/radeon/ci_dpm.c table->VCEInterval = 1; table 3649 drivers/gpu/drm/radeon/ci_dpm.c table->ACPInterval = 1; table 3650 drivers/gpu/drm/radeon/ci_dpm.c table->SAMUInterval = 1; table 3651 drivers/gpu/drm/radeon/ci_dpm.c table->GraphicsVoltageChangeEnable = 1; table 3652 drivers/gpu/drm/radeon/ci_dpm.c table->GraphicsThermThrottleEnable = 1; table 3653 drivers/gpu/drm/radeon/ci_dpm.c table->GraphicsInterval = 1; table 3654 drivers/gpu/drm/radeon/ci_dpm.c table->VoltageInterval = 1; table 3655 drivers/gpu/drm/radeon/ci_dpm.c table->ThermalInterval = 1; table 3656 drivers/gpu/drm/radeon/ci_dpm.c table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * table 3658 drivers/gpu/drm/radeon/ci_dpm.c table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * table 3660 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryVoltageChangeEnable = 1; table 3661 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryInterval = 1; table 3662 drivers/gpu/drm/radeon/ci_dpm.c table->VoltageResponseTime = 0; table 3663 drivers/gpu/drm/radeon/ci_dpm.c table->VddcVddciDelta = 4000; table 3664 drivers/gpu/drm/radeon/ci_dpm.c table->PhaseResponseTime = 0; table 3665 drivers/gpu/drm/radeon/ci_dpm.c table->MemoryThermThrottleEnable = 1; table 3666 drivers/gpu/drm/radeon/ci_dpm.c table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; table 3667 drivers/gpu/drm/radeon/ci_dpm.c table->PCIeGenInterval = 1; table 3669 drivers/gpu/drm/radeon/ci_dpm.c table->SVI2Enable = 1; table 3671 drivers/gpu/drm/radeon/ci_dpm.c table->SVI2Enable = 0; table 3673 drivers/gpu/drm/radeon/ci_dpm.c table->ThermGpio = 17; table 3674 drivers/gpu/drm/radeon/ci_dpm.c table->SclkStepSize = 0x4000; table 3676 drivers/gpu/drm/radeon/ci_dpm.c table->SystemFlags = cpu_to_be32(table->SystemFlags); table 3677 drivers/gpu/drm/radeon/ci_dpm.c table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); table 3678 drivers/gpu/drm/radeon/ci_dpm.c table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); table 3679 drivers/gpu/drm/radeon/ci_dpm.c table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); table 3680 drivers/gpu/drm/radeon/ci_dpm.c table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); table 3681 drivers/gpu/drm/radeon/ci_dpm.c table->SclkStepSize = cpu_to_be32(table->SclkStepSize); table 3682 drivers/gpu/drm/radeon/ci_dpm.c table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); table 3683 drivers/gpu/drm/radeon/ci_dpm.c table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); table 3684 drivers/gpu/drm/radeon/ci_dpm.c table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); table 3685 drivers/gpu/drm/radeon/ci_dpm.c table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); table 3686 drivers/gpu/drm/radeon/ci_dpm.c table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); table 3687 drivers/gpu/drm/radeon/ci_dpm.c table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); table 3688 drivers/gpu/drm/radeon/ci_dpm.c table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); table 3689 drivers/gpu/drm/radeon/ci_dpm.c table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); table 3694 drivers/gpu/drm/radeon/ci_dpm.c (u8 *)&table->SystemFlags, table 4103 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_vce_clock_voltage_dependency_table *table = table 4106 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) { table 4107 drivers/gpu/drm/radeon/ci_dpm.c if (table->entries[i].evclk >= min_evclk) table 4111 drivers/gpu/drm/radeon/ci_dpm.c return table->count - 1; table 4337 drivers/gpu/drm/radeon/ci_dpm.c struct ci_mc_reg_table *table) table 4343 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0, j = table->last; i < table->last; i++) { table 4346 drivers/gpu/drm/radeon/ci_dpm.c switch(table->mc_reg_address[i].s1 << 2) { table 4349 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; table 4350 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; table 4351 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4352 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 4353 drivers/gpu/drm/radeon/ci_dpm.c ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 4360 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; table 4361 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; table 4362 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4363 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 4364 drivers/gpu/drm/radeon/ci_dpm.c (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 4366 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 4373 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; table 4374 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; table 4375 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4376 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 4377 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; table 4386 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; table 4387 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; table 4388 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4389 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 4390 drivers/gpu/drm/radeon/ci_dpm.c (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 4402 drivers/gpu/drm/radeon/ci_dpm.c table->last = j; table 4480 drivers/gpu/drm/radeon/ci_dpm.c static void ci_set_valid_flag(struct ci_mc_reg_table *table) table 4484 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->last; i++) { table 4485 drivers/gpu/drm/radeon/ci_dpm.c for (j = 1; j < table->num_entries; j++) { table 4486 drivers/gpu/drm/radeon/ci_dpm.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table 4487 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[j].mc_data[i]) { table 4488 drivers/gpu/drm/radeon/ci_dpm.c table->valid_flag |= 1 << i; table 4495 drivers/gpu/drm/radeon/ci_dpm.c static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) table 4500 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->last; i++) { table 4501 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_address[i].s0 = table 4502 drivers/gpu/drm/radeon/ci_dpm.c ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? table 4503 drivers/gpu/drm/radeon/ci_dpm.c address : table->mc_reg_address[i].s1; table 4507 drivers/gpu/drm/radeon/ci_dpm.c static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, table 4512 drivers/gpu/drm/radeon/ci_dpm.c if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) table 4514 drivers/gpu/drm/radeon/ci_dpm.c if (table->num_entries > MAX_AC_TIMING_ENTRIES) table 4517 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->last; i++) table 4518 drivers/gpu/drm/radeon/ci_dpm.c ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 4520 drivers/gpu/drm/radeon/ci_dpm.c ci_table->last = table->last; table 4522 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->num_entries; i++) { table 4524 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[i].mclk_max; table 4525 drivers/gpu/drm/radeon/ci_dpm.c for (j = 0; j < table->last; j++) table 4527 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[i].mc_data[j]; table 4529 drivers/gpu/drm/radeon/ci_dpm.c ci_table->num_entries = table->num_entries; table 4535 drivers/gpu/drm/radeon/ci_dpm.c struct ci_mc_reg_table *table) table 4547 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->last; i++) { table 4548 drivers/gpu/drm/radeon/ci_dpm.c if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) table 4550 drivers/gpu/drm/radeon/ci_dpm.c switch(table->mc_reg_address[i].s1 >> 2) { table 4552 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4553 drivers/gpu/drm/radeon/ci_dpm.c if ((table->mc_reg_table_entry[k].mclk_max == 125000) || table 4554 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mclk_max == 137500)) table 4555 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4556 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | table 4561 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4562 drivers/gpu/drm/radeon/ci_dpm.c if ((table->mc_reg_table_entry[k].mclk_max == 125000) || table 4563 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mclk_max == 137500)) table 4564 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4565 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | table 4570 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4571 drivers/gpu/drm/radeon/ci_dpm.c if ((table->mc_reg_table_entry[k].mclk_max == 125000) || table 4572 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mclk_max == 137500)) table 4573 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4574 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | table 4579 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4580 drivers/gpu/drm/radeon/ci_dpm.c if ((table->mc_reg_table_entry[k].mclk_max == 125000) || table 4581 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mclk_max == 137500)) table 4582 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = 0; table 4586 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4587 drivers/gpu/drm/radeon/ci_dpm.c if (table->mc_reg_table_entry[k].mclk_max == 125000) table 4588 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4589 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | table 4591 drivers/gpu/drm/radeon/ci_dpm.c else if (table->mc_reg_table_entry[k].mclk_max == 137500) table 4592 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4593 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | table 4598 drivers/gpu/drm/radeon/ci_dpm.c for (k = 0; k < table->num_entries; k++) { table 4599 drivers/gpu/drm/radeon/ci_dpm.c if (table->mc_reg_table_entry[k].mclk_max == 125000) table 4600 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4601 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | table 4603 drivers/gpu/drm/radeon/ci_dpm.c else if (table->mc_reg_table_entry[k].mclk_max == 137500) table 4604 drivers/gpu/drm/radeon/ci_dpm.c table->mc_reg_table_entry[k].mc_data[i] = table 4605 drivers/gpu/drm/radeon/ci_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | table 4627 drivers/gpu/drm/radeon/ci_dpm.c struct atom_mc_reg_table *table; table 4632 drivers/gpu/drm/radeon/ci_dpm.c table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); table 4633 drivers/gpu/drm/radeon/ci_dpm.c if (!table) table 4657 drivers/gpu/drm/radeon/ci_dpm.c ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); table 4661 drivers/gpu/drm/radeon/ci_dpm.c ret = ci_copy_vbios_mc_reg_table(table, ci_table); table 4678 drivers/gpu/drm/radeon/ci_dpm.c kfree(table); table 4989 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_clock_voltage_dependency_table *table) table 4993 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 4994 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) table 4995 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); table 5000 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_clock_voltage_dependency_table *table) table 5004 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 5005 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) table 5006 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); table 5011 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_vce_clock_voltage_dependency_table *table) table 5015 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 5016 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) table 5017 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); table 5022 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_uvd_clock_voltage_dependency_table *table) table 5026 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 5027 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) table 5028 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); table 5033 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_phase_shedding_limits_table *table) table 5037 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 5038 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) table 5039 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); table 5044 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_clock_and_voltage_limits *table) table 5046 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 5047 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); table 5048 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); table 5053 drivers/gpu/drm/radeon/ci_dpm.c struct radeon_cac_leakage_table *table) table 5057 drivers/gpu/drm/radeon/ci_dpm.c if (table) { table 5058 drivers/gpu/drm/radeon/ci_dpm.c for (i = 0; i < table->count; i++) table 5059 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); table 405 drivers/gpu/drm/radeon/cypress_dpm.c struct atom_voltage_table *table, table 410 drivers/gpu/drm/radeon/cypress_dpm.c for (i = 0; i < table->count; i++) { table 411 drivers/gpu/drm/radeon/cypress_dpm.c if (value <= table->entries[i].value) { table 413 drivers/gpu/drm/radeon/cypress_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); table 418 drivers/gpu/drm/radeon/cypress_dpm.c if (i == table->count) table 1237 drivers/gpu/drm/radeon/cypress_dpm.c RV770_SMC_STATETABLE *table) table 1244 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = table 1246 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = table 1248 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = table 1250 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = table 1252 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = table 1254 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vDLL_CNTL = table 1257 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_SS = table 1259 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 = table 1262 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mclk.mclk770.mclk_value = table 1265 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 1267 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 1269 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 1271 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = table 1273 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = table 1276 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].sclk.sclk_value = table 1279 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; table 1281 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].ACIndex = 0; table 1286 drivers/gpu/drm/radeon/cypress_dpm.c &table->initialState.levels[0].vddc); table 1292 drivers/gpu/drm/radeon/cypress_dpm.c &table->initialState.levels[0].vddci); table 1295 drivers/gpu/drm/radeon/cypress_dpm.c &table->initialState.levels[0].mvdd); table 1298 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].aT = cpu_to_be32(a_t); table 1300 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table 1304 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].gen2PCIE = 1; table 1306 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].gen2PCIE = 0; table 1308 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].gen2XSP = 1; table 1310 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].gen2XSP = 0; table 1313 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].strobeMode = table 1318 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG; table 1320 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[0].mcFlags = 0; table 1323 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[1] = table->initialState.levels[0]; table 1324 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.levels[2] = table->initialState.levels[0]; table 1326 drivers/gpu/drm/radeon/cypress_dpm.c table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table 1332 drivers/gpu/drm/radeon/cypress_dpm.c RV770_SMC_STATETABLE *table) table 1355 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState = table->initialState; table 1357 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1363 drivers/gpu/drm/radeon/cypress_dpm.c &table->ACPIState.levels[0].vddc); table 1366 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].gen2PCIE = 1; table 1368 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 1370 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 1372 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].gen2XSP = 1; table 1374 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].gen2XSP = 0; table 1379 drivers/gpu/drm/radeon/cypress_dpm.c &table->ACPIState.levels[0].vddc); table 1380 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 1388 drivers/gpu/drm/radeon/cypress_dpm.c &table->ACPIState.levels[0].vddci); table 1434 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = table 1436 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = table 1438 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = table 1440 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = table 1442 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = table 1444 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); table 1446 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0; table 1448 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 1450 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 1452 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 1455 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 1457 drivers/gpu/drm/radeon/cypress_dpm.c cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); table 1460 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[0].ACIndex = 1; table 1462 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[1] = table->ACPIState.levels[0]; table 1463 drivers/gpu/drm/radeon/cypress_dpm.c table->ACPIState.levels[2] = table->ACPIState.levels[0]; table 1514 drivers/gpu/drm/radeon/cypress_dpm.c RV770_SMC_STATETABLE *table) table 1519 drivers/gpu/drm/radeon/cypress_dpm.c table->highSMIO[i] = 0; table 1520 drivers/gpu/drm/radeon/cypress_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); table 1525 drivers/gpu/drm/radeon/cypress_dpm.c RV770_SMC_STATETABLE *table) table 1534 drivers/gpu/drm/radeon/cypress_dpm.c table); table 1536 drivers/gpu/drm/radeon/cypress_dpm.c table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0; table 1537 drivers/gpu/drm/radeon/cypress_dpm.c table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] = table 1543 drivers/gpu/drm/radeon/cypress_dpm.c table->maxVDDCIndexInPPTable = i; table 1552 drivers/gpu/drm/radeon/cypress_dpm.c table); table 1554 drivers/gpu/drm/radeon/cypress_dpm.c table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; table 1555 drivers/gpu/drm/radeon/cypress_dpm.c table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = table 1616 drivers/gpu/drm/radeon/cypress_dpm.c RV770_SMC_STATETABLE *table = &pi->smc_statetable; table 1619 drivers/gpu/drm/radeon/cypress_dpm.c memset(table, 0, sizeof(RV770_SMC_STATETABLE)); table 1621 drivers/gpu/drm/radeon/cypress_dpm.c cypress_populate_smc_voltage_tables(rdev, table); table 1626 drivers/gpu/drm/radeon/cypress_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; table 1629 drivers/gpu/drm/radeon/cypress_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; table 1632 drivers/gpu/drm/radeon/cypress_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; table 1637 drivers/gpu/drm/radeon/cypress_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1640 drivers/gpu/drm/radeon/cypress_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; table 1643 drivers/gpu/drm/radeon/cypress_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1646 drivers/gpu/drm/radeon/cypress_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1648 drivers/gpu/drm/radeon/cypress_dpm.c ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table); table 1652 drivers/gpu/drm/radeon/cypress_dpm.c ret = cypress_populate_smc_acpi_state(rdev, table); table 1656 drivers/gpu/drm/radeon/cypress_dpm.c table->driverState = table->initialState; table 1660 drivers/gpu/drm/radeon/cypress_dpm.c (u8 *)table, sizeof(RV770_SMC_STATETABLE), table 118 drivers/gpu/drm/radeon/cypress_dpm.h RV770_SMC_STATETABLE *table); table 120 drivers/gpu/drm/radeon/cypress_dpm.h RV770_SMC_STATETABLE *table); table 123 drivers/gpu/drm/radeon/cypress_dpm.h RV770_SMC_STATETABLE *table); table 720 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 723 drivers/gpu/drm/radeon/kv_dpm.c if (table && table->count) { table 725 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].clk == pi->boot_pl.sclk) table 732 drivers/gpu/drm/radeon/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 735 drivers/gpu/drm/radeon/kv_dpm.c if (table->num_max_dpm_entries == 0) table 739 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) table 822 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_uvd_clock_voltage_dependency_table *table = table 828 drivers/gpu/drm/radeon/kv_dpm.c if (table == NULL || table->count == 0) table 832 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 834 drivers/gpu/drm/radeon/kv_dpm.c (pi->high_voltage_t < table->entries[i].v)) table 837 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); table 838 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); table 839 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); table 842 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); table 844 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); table 847 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].vclk, false, ÷rs); table 853 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].dclk, false, ÷rs); table 895 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_vce_clock_voltage_dependency_table *table = table 899 drivers/gpu/drm/radeon/kv_dpm.c if (table == NULL || table->count == 0) table 903 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 905 drivers/gpu/drm/radeon/kv_dpm.c pi->high_voltage_t < table->entries[i].v) table 908 drivers/gpu/drm/radeon/kv_dpm.c pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); table 909 drivers/gpu/drm/radeon/kv_dpm.c pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); table 912 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); table 915 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].evclk, false, ÷rs); table 956 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 962 drivers/gpu/drm/radeon/kv_dpm.c if (table == NULL || table->count == 0) table 966 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 968 drivers/gpu/drm/radeon/kv_dpm.c pi->high_voltage_t < table->entries[i].v) table 971 drivers/gpu/drm/radeon/kv_dpm.c pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); table 972 drivers/gpu/drm/radeon/kv_dpm.c pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); table 975 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); table 978 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].clk, false, ÷rs); table 1022 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 1028 drivers/gpu/drm/radeon/kv_dpm.c if (table == NULL || table->count == 0) table 1032 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 1033 drivers/gpu/drm/radeon/kv_dpm.c pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); table 1034 drivers/gpu/drm/radeon/kv_dpm.c pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); table 1037 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].clk, false, ÷rs); table 1081 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 1084 drivers/gpu/drm/radeon/kv_dpm.c if (table && table->count) { table 1087 drivers/gpu/drm/radeon/kv_dpm.c if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) table 1089 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) table 1091 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) table 1093 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) table 1095 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) table 1104 drivers/gpu/drm/radeon/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 1108 drivers/gpu/drm/radeon/kv_dpm.c if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) table 1110 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) table 1112 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) table 1114 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) table 1116 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) table 1428 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_uvd_clock_voltage_dependency_table *table = table 1434 drivers/gpu/drm/radeon/kv_dpm.c if (table->count) table 1435 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_boot_level = table->count - 1; table 1464 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_vce_clock_voltage_dependency_table *table = table 1467 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 1468 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].evclk >= evclk) table 1480 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_vce_clock_voltage_dependency_table *table = table 1489 drivers/gpu/drm/radeon/kv_dpm.c pi->vce_boot_level = table->count - 1; table 1521 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 1527 drivers/gpu/drm/radeon/kv_dpm.c pi->samu_boot_level = table->count - 1; table 1552 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 1555 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 1556 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].clk >= 0) /* XXX */ table 1560 drivers/gpu/drm/radeon/kv_dpm.c if (i >= table->count) table 1561 drivers/gpu/drm/radeon/kv_dpm.c i = table->count - 1; table 1585 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 1591 drivers/gpu/drm/radeon/kv_dpm.c pi->acp_boot_level = table->count - 1; table 1713 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 1716 drivers/gpu/drm/radeon/kv_dpm.c if (table && table->count) { table 1718 drivers/gpu/drm/radeon/kv_dpm.c if ((table->entries[i].clk >= new_ps->levels[0].sclk) || table 1726 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) table 1732 drivers/gpu/drm/radeon/kv_dpm.c if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > table 1733 drivers/gpu/drm/radeon/kv_dpm.c (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) table 1739 drivers/gpu/drm/radeon/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 1743 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || table 1751 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].sclk_frequency <= table 1759 drivers/gpu/drm/radeon/kv_dpm.c table->entries[pi->highest_valid].sclk_frequency) > table 1760 drivers/gpu/drm/radeon/kv_dpm.c (table->entries[pi->lowest_valid].sclk_frequency - table 1969 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_and_voltage_limits *table) table 1975 drivers/gpu/drm/radeon/kv_dpm.c table->sclk = table 1977 drivers/gpu/drm/radeon/kv_dpm.c table->vddc = table 1982 drivers/gpu/drm/radeon/kv_dpm.c table->mclk = pi->sys_info.nbp_memory_clock[0]; table 2108 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 2112 drivers/gpu/drm/radeon/kv_dpm.c if (table && table->count) { table 2113 drivers/gpu/drm/radeon/kv_dpm.c for (i = table->count - 1; i >= 0; i--) { table 2115 drivers/gpu/drm/radeon/kv_dpm.c (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= table 2122 drivers/gpu/drm/radeon/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 2125 drivers/gpu/drm/radeon/kv_dpm.c for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { table 2127 drivers/gpu/drm/radeon/kv_dpm.c (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= table 2149 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 2169 drivers/gpu/drm/radeon/kv_dpm.c for (i = table->count - 1; i >= 0; i--) { table 2170 drivers/gpu/drm/radeon/kv_dpm.c if (stable_p_state_sclk >= table->entries[i].clk) { table 2171 drivers/gpu/drm/radeon/kv_dpm.c stable_p_state_sclk = table->entries[i].clk; table 2177 drivers/gpu/drm/radeon/kv_dpm.c stable_p_state_sclk = table->entries[0].clk; table 2194 drivers/gpu/drm/radeon/kv_dpm.c if (table && table->count) { table 2200 drivers/gpu/drm/radeon/kv_dpm.c ps->levels[i].sclk = table->entries[limit].clk; table 2204 drivers/gpu/drm/radeon/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 2212 drivers/gpu/drm/radeon/kv_dpm.c ps->levels[i].sclk = table->entries[limit].sclk_frequency; table 2353 drivers/gpu/drm/radeon/kv_dpm.c struct radeon_clock_voltage_dependency_table *table = table 2356 drivers/gpu/drm/radeon/kv_dpm.c if (table && table->count) { table 2360 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->count; i++) { table 2363 drivers/gpu/drm/radeon/kv_dpm.c kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) table 2366 drivers/gpu/drm/radeon/kv_dpm.c kv_set_divider_value(rdev, i, table->entries[i].clk); table 2369 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].v); table 2376 drivers/gpu/drm/radeon/kv_dpm.c struct sumo_sclk_voltage_mapping_table *table = table 2380 drivers/gpu/drm/radeon/kv_dpm.c for (i = 0; i < table->num_max_dpm_entries; i++) { table 2383 drivers/gpu/drm/radeon/kv_dpm.c kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) table 2386 drivers/gpu/drm/radeon/kv_dpm.c kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); table 2387 drivers/gpu/drm/radeon/kv_dpm.c kv_set_vid(rdev, i, table->entries[i].vid_2bit); table 115 drivers/gpu/drm/radeon/mkregtable.c unsigned *table; table 131 drivers/gpu/drm/radeon/mkregtable.c static void table_offset_add(struct table *t, struct offset *offset) table 136 drivers/gpu/drm/radeon/mkregtable.c static void table_init(struct table *t) table 141 drivers/gpu/drm/radeon/mkregtable.c t->table = NULL; table 144 drivers/gpu/drm/radeon/mkregtable.c static void table_print(struct table *t) table 162 drivers/gpu/drm/radeon/mkregtable.c printf("0x%08X,", t->table[id++]); table 169 drivers/gpu/drm/radeon/mkregtable.c static int table_build(struct table *t) table 175 drivers/gpu/drm/radeon/mkregtable.c t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry); table 176 drivers/gpu/drm/radeon/mkregtable.c if (t->table == NULL) table 178 drivers/gpu/drm/radeon/mkregtable.c memset(t->table, 0xff, sizeof(unsigned) * t->nentry); table 183 drivers/gpu/drm/radeon/mkregtable.c t->table[i] ^= m; table 189 drivers/gpu/drm/radeon/mkregtable.c static int parser_auth(struct table *t, const char *filename) table 267 drivers/gpu/drm/radeon/mkregtable.c struct table t; table 992 drivers/gpu/drm/radeon/ni_dpm.c struct radeon_clock_voltage_dependency_table *table) table 997 drivers/gpu/drm/radeon/ni_dpm.c if (table) { table 998 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0; i < table->count; i++) { table 999 drivers/gpu/drm/radeon/ni_dpm.c if (0xff01 == table->entries[i].v) { table 1002 drivers/gpu/drm/radeon/ni_dpm.c table->entries[i].v = pi->max_vddc; table 1261 drivers/gpu/drm/radeon/ni_dpm.c NISLANDS_SMC_STATETABLE *table) table 1266 drivers/gpu/drm/radeon/ni_dpm.c table->highSMIO[i] = 0; table 1267 drivers/gpu/drm/radeon/ni_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); table 1272 drivers/gpu/drm/radeon/ni_dpm.c NISLANDS_SMC_STATETABLE *table) table 1279 drivers/gpu/drm/radeon/ni_dpm.c ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table); table 1280 drivers/gpu/drm/radeon/ni_dpm.c table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0; table 1281 drivers/gpu/drm/radeon/ni_dpm.c table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = table 1286 drivers/gpu/drm/radeon/ni_dpm.c table->maxVDDCIndexInPPTable = i; table 1293 drivers/gpu/drm/radeon/ni_dpm.c ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table); table 1295 drivers/gpu/drm/radeon/ni_dpm.c table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; table 1296 drivers/gpu/drm/radeon/ni_dpm.c table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = table 1302 drivers/gpu/drm/radeon/ni_dpm.c struct atom_voltage_table *table, table 1308 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0; i < table->count; i++) { table 1309 drivers/gpu/drm/radeon/ni_dpm.c if (value <= table->entries[i].value) { table 1311 drivers/gpu/drm/radeon/ni_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); table 1316 drivers/gpu/drm/radeon/ni_dpm.c if (i >= table->count) table 1683 drivers/gpu/drm/radeon/ni_dpm.c NISLANDS_SMC_STATETABLE *table) table 1692 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = table 1694 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = table 1696 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = table 1698 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = table 1700 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = table 1702 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vDLL_CNTL = table 1704 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMPLL_SS = table 1706 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.vMPLL_SS2 = table 1708 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mclk.mclk_value = table 1711 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 1713 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 1715 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 1717 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = table 1719 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = table 1721 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = table 1723 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].sclk.sclk_value = table 1725 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].arbRefreshState = table 1728 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].ACIndex = 0; table 1732 drivers/gpu/drm/radeon/ni_dpm.c &table->initialState.levels[0].vddc); table 1737 drivers/gpu/drm/radeon/ni_dpm.c &table->initialState.levels[0].vddc, table 1741 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].vddc.index, table 1742 drivers/gpu/drm/radeon/ni_dpm.c &table->initialState.levels[0].std_vddc); table 1749 drivers/gpu/drm/radeon/ni_dpm.c &table->initialState.levels[0].vddci); table 1751 drivers/gpu/drm/radeon/ni_dpm.c ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd); table 1754 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].aT = cpu_to_be32(reg); table 1756 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table 1759 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].gen2PCIE = 1; table 1761 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].gen2PCIE = 0; table 1764 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].strobeMode = table 1769 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG; table 1771 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].mcFlags = 0; table 1774 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levelCount = 1; table 1776 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table 1778 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].dpm2.MaxPS = 0; table 1779 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].dpm2.NearTDPDec = 0; table 1780 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].dpm2.AboveSafeInc = 0; table 1781 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].dpm2.BelowSafeInc = 0; table 1784 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); table 1787 drivers/gpu/drm/radeon/ni_dpm.c table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); table 1793 drivers/gpu/drm/radeon/ni_dpm.c NISLANDS_SMC_STATETABLE *table) table 1811 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState = table->initialState; table 1813 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 1818 drivers/gpu/drm/radeon/ni_dpm.c pi->acpi_vddc, &table->ACPIState.levels[0].vddc); table 1823 drivers/gpu/drm/radeon/ni_dpm.c &table->ACPIState.levels[0].vddc, &std_vddc); table 1826 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].vddc.index, table 1827 drivers/gpu/drm/radeon/ni_dpm.c &table->ACPIState.levels[0].std_vddc); table 1832 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].gen2PCIE = 1; table 1834 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 1836 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 1842 drivers/gpu/drm/radeon/ni_dpm.c &table->ACPIState.levels[0].vddc); table 1847 drivers/gpu/drm/radeon/ni_dpm.c &table->ACPIState.levels[0].vddc, table 1851 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].vddc.index, table 1852 drivers/gpu/drm/radeon/ni_dpm.c &table->ACPIState.levels[0].std_vddc); table 1854 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 1862 drivers/gpu/drm/radeon/ni_dpm.c &table->ACPIState.levels[0].vddci); table 1905 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); table 1906 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); table 1907 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); table 1908 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); table 1909 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); table 1910 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl); table 1912 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].mclk.mclk_value = 0; table 1914 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); table 1915 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); table 1916 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); table 1917 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4); table 1919 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 1921 drivers/gpu/drm/radeon/ni_dpm.c ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); table 1924 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].ACIndex = 1; table 1926 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].dpm2.MaxPS = 0; table 1927 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].dpm2.NearTDPDec = 0; table 1928 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; table 1929 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; table 1932 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); table 1935 drivers/gpu/drm/radeon/ni_dpm.c table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); table 1946 drivers/gpu/drm/radeon/ni_dpm.c NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable; table 1948 drivers/gpu/drm/radeon/ni_dpm.c memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE)); table 1950 drivers/gpu/drm/radeon/ni_dpm.c ni_populate_smc_voltage_tables(rdev, table); table 1955 drivers/gpu/drm/radeon/ni_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; table 1958 drivers/gpu/drm/radeon/ni_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; table 1961 drivers/gpu/drm/radeon/ni_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; table 1966 drivers/gpu/drm/radeon/ni_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1969 drivers/gpu/drm/radeon/ni_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; table 1972 drivers/gpu/drm/radeon/ni_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1975 drivers/gpu/drm/radeon/ni_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1977 drivers/gpu/drm/radeon/ni_dpm.c ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table); table 1981 drivers/gpu/drm/radeon/ni_dpm.c ret = ni_populate_smc_acpi_state(rdev, table); table 1985 drivers/gpu/drm/radeon/ni_dpm.c table->driverState = table->initialState; table 1987 drivers/gpu/drm/radeon/ni_dpm.c table->ULVState = table->initialState; table 1994 drivers/gpu/drm/radeon/ni_dpm.c return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table, table 2710 drivers/gpu/drm/radeon/ni_dpm.c struct ni_mc_reg_table *table) table 2716 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0, j = table->last; i < table->last; i++) { table 2717 drivers/gpu/drm/radeon/ni_dpm.c switch (table->mc_reg_address[i].s1) { table 2722 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; table 2723 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; table 2724 drivers/gpu/drm/radeon/ni_dpm.c for (k = 0; k < table->num_entries; k++) table 2725 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 2727 drivers/gpu/drm/radeon/ni_dpm.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 2733 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; table 2734 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; table 2735 drivers/gpu/drm/radeon/ni_dpm.c for(k = 0; k < table->num_entries; k++) { table 2736 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 2738 drivers/gpu/drm/radeon/ni_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 2740 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 2748 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; table 2749 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; table 2750 drivers/gpu/drm/radeon/ni_dpm.c for (k = 0; k < table->num_entries; k++) table 2751 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 2753 drivers/gpu/drm/radeon/ni_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 2763 drivers/gpu/drm/radeon/ni_dpm.c table->last = j; table 2820 drivers/gpu/drm/radeon/ni_dpm.c static void ni_set_valid_flag(struct ni_mc_reg_table *table) table 2824 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0; i < table->last; i++) { table 2825 drivers/gpu/drm/radeon/ni_dpm.c for (j = 1; j < table->num_entries; j++) { table 2826 drivers/gpu/drm/radeon/ni_dpm.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { table 2827 drivers/gpu/drm/radeon/ni_dpm.c table->valid_flag |= 1 << i; table 2834 drivers/gpu/drm/radeon/ni_dpm.c static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table) table 2839 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0; i < table->last; i++) table 2840 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_address[i].s0 = table 2841 drivers/gpu/drm/radeon/ni_dpm.c ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? table 2842 drivers/gpu/drm/radeon/ni_dpm.c address : table->mc_reg_address[i].s1; table 2845 drivers/gpu/drm/radeon/ni_dpm.c static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, table 2850 drivers/gpu/drm/radeon/ni_dpm.c if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) table 2852 drivers/gpu/drm/radeon/ni_dpm.c if (table->num_entries > MAX_AC_TIMING_ENTRIES) table 2855 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0; i < table->last; i++) table 2856 drivers/gpu/drm/radeon/ni_dpm.c ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 2857 drivers/gpu/drm/radeon/ni_dpm.c ni_table->last = table->last; table 2859 drivers/gpu/drm/radeon/ni_dpm.c for (i = 0; i < table->num_entries; i++) { table 2861 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_table_entry[i].mclk_max; table 2862 drivers/gpu/drm/radeon/ni_dpm.c for (j = 0; j < table->last; j++) table 2864 drivers/gpu/drm/radeon/ni_dpm.c table->mc_reg_table_entry[i].mc_data[j]; table 2866 drivers/gpu/drm/radeon/ni_dpm.c ni_table->num_entries = table->num_entries; table 2875 drivers/gpu/drm/radeon/ni_dpm.c struct atom_mc_reg_table *table; table 2879 drivers/gpu/drm/radeon/ni_dpm.c table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); table 2880 drivers/gpu/drm/radeon/ni_dpm.c if (!table) table 2897 drivers/gpu/drm/radeon/ni_dpm.c ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); table 2902 drivers/gpu/drm/radeon/ni_dpm.c ret = ni_copy_vbios_mc_reg_table(table, ni_table); table 2917 drivers/gpu/drm/radeon/ni_dpm.c kfree(table); table 133 drivers/gpu/drm/radeon/radeon_combios.c enum radeon_combios_table_offset table) table 142 drivers/gpu/drm/radeon/radeon_combios.c switch (table) { table 366 drivers/gpu/drm/radeon/radeon_combios.c if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) table 3328 drivers/gpu/drm/radeon/radeon_combios.c uint16_t table; table 3335 drivers/gpu/drm/radeon/radeon_combios.c table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE); table 3336 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3337 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_mmio_table(dev, table); table 3340 drivers/gpu/drm/radeon/radeon_combios.c table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE); table 3341 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3342 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_pll_table(dev, table); table 3345 drivers/gpu/drm/radeon/radeon_combios.c table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE); table 3346 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3347 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_mmio_table(dev, table); table 3351 drivers/gpu/drm/radeon/radeon_combios.c table = table 3353 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3354 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_mmio_table(dev, table); table 3357 drivers/gpu/drm/radeon/radeon_combios.c table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE); table 3358 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3359 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_ram_reset_table(dev, table); table 3362 drivers/gpu/drm/radeon/radeon_combios.c table = table 3364 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3365 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_mmio_table(dev, table); table 3411 drivers/gpu/drm/radeon/radeon_combios.c table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); table 3412 drivers/gpu/drm/radeon/radeon_combios.c if (table) table 3413 drivers/gpu/drm/radeon/radeon_combios.c combios_parse_pll_table(dev, table); table 229 drivers/gpu/drm/radeon/rv730_dpm.c RV770_SMC_STATETABLE *table) table 241 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState = table->initialState; table 242 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 246 drivers/gpu/drm/radeon/rv730_dpm.c &table->ACPIState.levels[0].vddc); table 247 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].gen2PCIE = pi->pcie_gen2 ? table 249 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].gen2XSP = table 253 drivers/gpu/drm/radeon/rv730_dpm.c &table->ACPIState.levels[0].vddc); table 254 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 296 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); table 297 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = cpu_to_be32(mpll_func_cntl_2); table 298 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 = cpu_to_be32(mpll_func_cntl_3); table 299 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); table 300 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].mclk.mclk730.vDLL_CNTL = cpu_to_be32(dll_cntl); table 302 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].mclk.mclk730.mclk_value = 0; table 304 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); table 305 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); table 306 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); table 308 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 310 drivers/gpu/drm/radeon/rv730_dpm.c rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); table 312 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[1] = table->ACPIState.levels[0]; table 313 drivers/gpu/drm/radeon/rv730_dpm.c table->ACPIState.levels[2] = table->ACPIState.levels[0]; table 320 drivers/gpu/drm/radeon/rv730_dpm.c RV770_SMC_STATETABLE *table) table 326 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL = table 328 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL2 = table 330 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vMPLL_FUNC_CNTL3 = table 332 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vMCLK_PWRMGT_CNTL = table 334 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vDLL_CNTL = table 336 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vMPLL_SS = table 338 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.vMPLL_SS2 = table 341 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].mclk.mclk730.mclk_value = table 344 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 346 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 348 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 350 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = table 352 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = table 355 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].sclk.sclk_value = table 358 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; table 360 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].seqValue = table 365 drivers/gpu/drm/radeon/rv730_dpm.c &table->initialState.levels[0].vddc); table 367 drivers/gpu/drm/radeon/rv730_dpm.c &table->initialState.levels[0].mvdd); table 371 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].aT = cpu_to_be32(a_t); table 373 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table 376 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].gen2PCIE = 1; table 378 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].gen2PCIE = 0; table 380 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].gen2XSP = 1; table 382 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[0].gen2XSP = 0; table 384 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[1] = table->initialState.levels[0]; table 385 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.levels[2] = table->initialState.levels[0]; table 387 drivers/gpu/drm/radeon/rv730_dpm.c table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table 315 drivers/gpu/drm/radeon/rv740_dpm.c RV770_SMC_STATETABLE *table) table 328 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState = table->initialState; table 330 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 334 drivers/gpu/drm/radeon/rv740_dpm.c &table->ACPIState.levels[0].vddc); table 335 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].gen2PCIE = table 338 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].gen2XSP = table 342 drivers/gpu/drm/radeon/rv740_dpm.c &table->ACPIState.levels[0].vddc); table 343 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 373 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); table 374 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); table 375 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); table 376 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); table 377 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); table 378 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); table 380 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0; table 382 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); table 383 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); table 384 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); table 386 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 388 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[1] = table->ACPIState.levels[0]; table 389 drivers/gpu/drm/radeon/rv740_dpm.c table->ACPIState.levels[2] = table->ACPIState.levels[0]; table 391 drivers/gpu/drm/radeon/rv740_dpm.c rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); table 915 drivers/gpu/drm/radeon/rv770_dpm.c RV770_SMC_STATETABLE *table) table 936 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState = table->initialState; table 938 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 942 drivers/gpu/drm/radeon/rv770_dpm.c &table->ACPIState.levels[0].vddc); table 945 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].gen2PCIE = 1; table 947 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 949 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 951 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].gen2XSP = 1; table 953 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].gen2XSP = 0; table 956 drivers/gpu/drm/radeon/rv770_dpm.c &table->ACPIState.levels[0].vddc); table 957 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].gen2PCIE = 0; table 981 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); table 982 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); table 983 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); table 984 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); table 986 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); table 987 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); table 989 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0; table 991 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); table 992 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); table 993 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); table 995 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 997 drivers/gpu/drm/radeon/rv770_dpm.c rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); table 999 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[1] = table->ACPIState.levels[0]; table 1000 drivers/gpu/drm/radeon/rv770_dpm.c table->ACPIState.levels[2] = table->ACPIState.levels[0]; table 1024 drivers/gpu/drm/radeon/rv770_dpm.c RV770_SMC_STATETABLE *table) table 1030 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = table 1032 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = table 1034 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = table 1036 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = table 1038 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = table 1040 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vDLL_CNTL = table 1043 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_SS = table 1045 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 = table 1048 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mclk.mclk770.mclk_value = table 1051 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 1053 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 1055 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 1057 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = table 1059 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = table 1062 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].sclk.sclk_value = table 1065 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; table 1067 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].seqValue = table 1072 drivers/gpu/drm/radeon/rv770_dpm.c &table->initialState.levels[0].vddc); table 1074 drivers/gpu/drm/radeon/rv770_dpm.c &table->initialState.levels[0].mvdd); table 1077 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].aT = cpu_to_be32(a_t); table 1079 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table 1082 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].gen2PCIE = 1; table 1084 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].gen2PCIE = 0; table 1086 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].gen2XSP = 1; table 1088 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].gen2XSP = 0; table 1093 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].strobeMode = table 1096 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].strobeMode = 0; table 1099 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG; table 1101 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[0].mcFlags = 0; table 1105 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[1] = table->initialState.levels[0]; table 1106 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.levels[2] = table->initialState.levels[0]; table 1108 drivers/gpu/drm/radeon/rv770_dpm.c table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table 1114 drivers/gpu/drm/radeon/rv770_dpm.c RV770_SMC_STATETABLE *table) table 1120 drivers/gpu/drm/radeon/rv770_dpm.c table->highSMIO[pi->vddc_table[i].vddc_index] = table 1122 drivers/gpu/drm/radeon/rv770_dpm.c table->lowSMIO[pi->vddc_table[i].vddc_index] = table 1126 drivers/gpu/drm/radeon/rv770_dpm.c table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0; table 1127 drivers/gpu/drm/radeon/rv770_dpm.c table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] = table 1136 drivers/gpu/drm/radeon/rv770_dpm.c table->maxVDDCIndexInPPTable = table 1143 drivers/gpu/drm/radeon/rv770_dpm.c RV770_SMC_STATETABLE *table) table 1148 drivers/gpu/drm/radeon/rv770_dpm.c table->lowSMIO[MVDD_HIGH_INDEX] |= table 1150 drivers/gpu/drm/radeon/rv770_dpm.c table->lowSMIO[MVDD_LOW_INDEX] |= table 1153 drivers/gpu/drm/radeon/rv770_dpm.c table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0; table 1154 drivers/gpu/drm/radeon/rv770_dpm.c table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] = table 1166 drivers/gpu/drm/radeon/rv770_dpm.c RV770_SMC_STATETABLE *table = &pi->smc_statetable; table 1169 drivers/gpu/drm/radeon/rv770_dpm.c memset(table, 0, sizeof(RV770_SMC_STATETABLE)); table 1173 drivers/gpu/drm/radeon/rv770_dpm.c rv770_populate_smc_vddc_table(rdev, table); table 1174 drivers/gpu/drm/radeon/rv770_dpm.c rv770_populate_smc_mvdd_table(rdev, table); table 1179 drivers/gpu/drm/radeon/rv770_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; table 1182 drivers/gpu/drm/radeon/rv770_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; table 1186 drivers/gpu/drm/radeon/rv770_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; table 1191 drivers/gpu/drm/radeon/rv770_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 1194 drivers/gpu/drm/radeon/rv770_dpm.c table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK; table 1197 drivers/gpu/drm/radeon/rv770_dpm.c table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE; table 1201 drivers/gpu/drm/radeon/rv770_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 1204 drivers/gpu/drm/radeon/rv770_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 1207 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table); table 1209 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table); table 1214 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv740_populate_smc_acpi_state(rdev, table); table 1216 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv730_populate_smc_acpi_state(rdev, table); table 1218 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv770_populate_smc_acpi_state(rdev, table); table 1222 drivers/gpu/drm/radeon/rv770_dpm.c table->driverState = table->initialState; table 1226 drivers/gpu/drm/radeon/rv770_dpm.c (const u8 *)table, table 188 drivers/gpu/drm/radeon/rv770_dpm.h RV770_SMC_STATETABLE *table); table 191 drivers/gpu/drm/radeon/rv770_dpm.h RV770_SMC_STATETABLE *table); table 209 drivers/gpu/drm/radeon/rv770_dpm.h RV770_SMC_STATETABLE *table); table 1748 drivers/gpu/drm/radeon/si_dpm.c const struct atom_voltage_table *table, table 2538 drivers/gpu/drm/radeon/si_dpm.c struct radeon_cac_leakage_table *table = table 2544 drivers/gpu/drm/radeon/si_dpm.c if (table == NULL) table 2550 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->count; i++) { table 2551 drivers/gpu/drm/radeon/si_dpm.c if (table->entries[i].vddc > *max) table 2552 drivers/gpu/drm/radeon/si_dpm.c *max = table->entries[i].vddc; table 2553 drivers/gpu/drm/radeon/si_dpm.c if (table->entries[i].vddc < *min) table 2554 drivers/gpu/drm/radeon/si_dpm.c *min = table->entries[i].vddc; table 2941 drivers/gpu/drm/radeon/si_dpm.c struct radeon_vce_clock_voltage_dependency_table *table = table 2945 drivers/gpu/drm/radeon/si_dpm.c (table && (table->count == 0))) { table 2950 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->count; i++) { table 2951 drivers/gpu/drm/radeon/si_dpm.c if ((evclk <= table->entries[i].evclk) && table 2952 drivers/gpu/drm/radeon/si_dpm.c (ecclk <= table->entries[i].ecclk)) { table 2953 drivers/gpu/drm/radeon/si_dpm.c *voltage = table->entries[i].v; table 2961 drivers/gpu/drm/radeon/si_dpm.c *voltage = table->entries[table->count - 1].v; table 3894 drivers/gpu/drm/radeon/si_dpm.c const struct atom_voltage_table *table, table 3899 drivers/gpu/drm/radeon/si_dpm.c if ((table == NULL) || (limits == NULL)) table 3902 drivers/gpu/drm/radeon/si_dpm.c data = table->mask_low; table 3911 drivers/gpu/drm/radeon/si_dpm.c if (table->count != num_levels) table 4040 drivers/gpu/drm/radeon/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4045 drivers/gpu/drm/radeon/si_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); table 4049 drivers/gpu/drm/radeon/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4065 drivers/gpu/drm/radeon/si_dpm.c si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table); table 4066 drivers/gpu/drm/radeon/si_dpm.c table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = table 4071 drivers/gpu/drm/radeon/si_dpm.c table->maxVDDCIndexInPPTable = i; table 4078 drivers/gpu/drm/radeon/si_dpm.c si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table); table 4080 drivers/gpu/drm/radeon/si_dpm.c table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = table 4086 drivers/gpu/drm/radeon/si_dpm.c si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table); table 4088 drivers/gpu/drm/radeon/si_dpm.c table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = table 4095 drivers/gpu/drm/radeon/si_dpm.c si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table); table 4097 drivers/gpu/drm/radeon/si_dpm.c table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = table 4112 drivers/gpu/drm/radeon/si_dpm.c const struct atom_voltage_table *table, table 4117 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->count; i++) { table 4118 drivers/gpu/drm/radeon/si_dpm.c if (value <= table->entries[i].value) { table 4120 drivers/gpu/drm/radeon/si_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); table 4125 drivers/gpu/drm/radeon/si_dpm.c if (i >= table->count) table 4368 drivers/gpu/drm/radeon/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4377 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vDLL_CNTL = table 4379 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = table 4381 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = table 4383 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = table 4385 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = table 4387 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = table 4389 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = table 4391 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_SS = table 4393 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.vMPLL_SS2 = table 4396 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mclk.mclk_value = table 4399 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 4401 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 4403 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 4405 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = table 4407 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = table 4409 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = table 4412 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].sclk.sclk_value = table 4415 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].arbRefreshState = table 4418 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].ACIndex = 0; table 4422 drivers/gpu/drm/radeon/si_dpm.c &table->initialState.levels[0].vddc); table 4428 drivers/gpu/drm/radeon/si_dpm.c &table->initialState.levels[0].vddc, table 4432 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].vddc.index, table 4433 drivers/gpu/drm/radeon/si_dpm.c &table->initialState.levels[0].std_vddc); table 4440 drivers/gpu/drm/radeon/si_dpm.c &table->initialState.levels[0].vddci); table 4448 drivers/gpu/drm/radeon/si_dpm.c &table->initialState.levels[0].vddc); table 4450 drivers/gpu/drm/radeon/si_dpm.c si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd); table 4453 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].aT = cpu_to_be32(reg); table 4455 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table 4457 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; table 4460 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].strobeMode = table 4465 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; table 4467 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].mcFlags = 0; table 4470 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levelCount = 1; table 4472 drivers/gpu/drm/radeon/si_dpm.c table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; table 4474 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].dpm2.MaxPS = 0; table 4475 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].dpm2.NearTDPDec = 0; table 4476 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].dpm2.AboveSafeInc = 0; table 4477 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].dpm2.BelowSafeInc = 0; table 4478 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; table 4481 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); table 4484 drivers/gpu/drm/radeon/si_dpm.c table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); table 4490 drivers/gpu/drm/radeon/si_dpm.c SISLANDS_SMC_STATETABLE *table) table 4509 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState = table->initialState; table 4511 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; table 4515 drivers/gpu/drm/radeon/si_dpm.c pi->acpi_vddc, &table->ACPIState.levels[0].vddc); table 4520 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].vddc, &std_vddc); table 4523 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].vddc.index, table 4524 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].std_vddc); table 4526 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; table 4534 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].vddc); table 4538 drivers/gpu/drm/radeon/si_dpm.c pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); table 4543 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].vddc, &std_vddc); table 4547 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].vddc.index, table 4548 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].std_vddc); table 4550 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev, table 4561 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].vddc); table 4568 drivers/gpu/drm/radeon/si_dpm.c &table->ACPIState.levels[0].vddci); table 4579 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vDLL_CNTL = table 4581 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = table 4583 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = table 4585 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = table 4587 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = table 4589 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = table 4591 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = table 4593 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_SS = table 4595 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.vMPLL_SS2 = table 4598 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = table 4600 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = table 4602 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = table 4604 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = table 4607 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].mclk.mclk_value = 0; table 4608 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].sclk.sclk_value = 0; table 4610 drivers/gpu/drm/radeon/si_dpm.c si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); table 4613 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].ACIndex = 0; table 4615 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].dpm2.MaxPS = 0; table 4616 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].dpm2.NearTDPDec = 0; table 4617 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; table 4618 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; table 4619 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; table 4622 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); table 4625 drivers/gpu/drm/radeon/si_dpm.c table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); table 4700 drivers/gpu/drm/radeon/si_dpm.c SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; table 4705 drivers/gpu/drm/radeon/si_dpm.c si_populate_smc_voltage_tables(rdev, table); table 4710 drivers/gpu/drm/radeon/si_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; table 4713 drivers/gpu/drm/radeon/si_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; table 4716 drivers/gpu/drm/radeon/si_dpm.c table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; table 4721 drivers/gpu/drm/radeon/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; table 4725 drivers/gpu/drm/radeon/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; table 4729 drivers/gpu/drm/radeon/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table 4732 drivers/gpu/drm/radeon/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table 4735 drivers/gpu/drm/radeon/si_dpm.c table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; table 4738 drivers/gpu/drm/radeon/si_dpm.c table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; table 4744 drivers/gpu/drm/radeon/si_dpm.c ret = si_populate_smc_initial_state(rdev, radeon_boot_state, table); table 4748 drivers/gpu/drm/radeon/si_dpm.c ret = si_populate_smc_acpi_state(rdev, table); table 4752 drivers/gpu/drm/radeon/si_dpm.c table->driverState = table->initialState; table 4760 drivers/gpu/drm/radeon/si_dpm.c ret = si_populate_ulv_state(rdev, &table->ULVState); table 4774 drivers/gpu/drm/radeon/si_dpm.c table->ULVState = table->initialState; table 4778 drivers/gpu/drm/radeon/si_dpm.c (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), table 5357 drivers/gpu/drm/radeon/si_dpm.c struct si_mc_reg_table *table) table 5363 drivers/gpu/drm/radeon/si_dpm.c for (i = 0, j = table->last; i < table->last; i++) { table 5366 drivers/gpu/drm/radeon/si_dpm.c switch (table->mc_reg_address[i].s1 << 2) { table 5369 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; table 5370 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; table 5371 drivers/gpu/drm/radeon/si_dpm.c for (k = 0; k < table->num_entries; k++) table 5372 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5374 drivers/gpu/drm/radeon/si_dpm.c ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); table 5380 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; table 5381 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; table 5382 drivers/gpu/drm/radeon/si_dpm.c for (k = 0; k < table->num_entries; k++) { table 5383 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5385 drivers/gpu/drm/radeon/si_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 5387 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table 5394 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; table 5395 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; table 5396 drivers/gpu/drm/radeon/si_dpm.c for (k = 0; k < table->num_entries; k++) table 5397 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5398 drivers/gpu/drm/radeon/si_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; table 5406 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; table 5407 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; table 5408 drivers/gpu/drm/radeon/si_dpm.c for(k = 0; k < table->num_entries; k++) table 5409 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[k].mc_data[j] = table 5411 drivers/gpu/drm/radeon/si_dpm.c (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); table 5421 drivers/gpu/drm/radeon/si_dpm.c table->last = j; table 5481 drivers/gpu/drm/radeon/si_dpm.c static void si_set_valid_flag(struct si_mc_reg_table *table) table 5485 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->last; i++) { table 5486 drivers/gpu/drm/radeon/si_dpm.c for (j = 1; j < table->num_entries; j++) { table 5487 drivers/gpu/drm/radeon/si_dpm.c if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { table 5488 drivers/gpu/drm/radeon/si_dpm.c table->valid_flag |= 1 << i; table 5495 drivers/gpu/drm/radeon/si_dpm.c static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) table 5500 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->last; i++) table 5501 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? table 5502 drivers/gpu/drm/radeon/si_dpm.c address : table->mc_reg_address[i].s1; table 5506 drivers/gpu/drm/radeon/si_dpm.c static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, table 5511 drivers/gpu/drm/radeon/si_dpm.c if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) table 5513 drivers/gpu/drm/radeon/si_dpm.c if (table->num_entries > MAX_AC_TIMING_ENTRIES) table 5516 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->last; i++) table 5517 drivers/gpu/drm/radeon/si_dpm.c si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; table 5518 drivers/gpu/drm/radeon/si_dpm.c si_table->last = table->last; table 5520 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->num_entries; i++) { table 5522 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[i].mclk_max; table 5523 drivers/gpu/drm/radeon/si_dpm.c for (j = 0; j < table->last; j++) { table 5525 drivers/gpu/drm/radeon/si_dpm.c table->mc_reg_table_entry[i].mc_data[j]; table 5528 drivers/gpu/drm/radeon/si_dpm.c si_table->num_entries = table->num_entries; table 5536 drivers/gpu/drm/radeon/si_dpm.c struct atom_mc_reg_table *table; table 5541 drivers/gpu/drm/radeon/si_dpm.c table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); table 5542 drivers/gpu/drm/radeon/si_dpm.c if (!table) table 5560 drivers/gpu/drm/radeon/si_dpm.c ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); table 5564 drivers/gpu/drm/radeon/si_dpm.c ret = si_copy_vbios_mc_reg_table(table, si_table); table 5577 drivers/gpu/drm/radeon/si_dpm.c kfree(table); table 5871 drivers/gpu/drm/radeon/si_dpm.c struct radeon_clock_voltage_dependency_table *table) table 5877 drivers/gpu/drm/radeon/si_dpm.c if (table) { table 5878 drivers/gpu/drm/radeon/si_dpm.c for (i = 0; i < table->count; i++) { table 5880 drivers/gpu/drm/radeon/si_dpm.c table->entries[i].v, table 5883 drivers/gpu/drm/radeon/si_dpm.c table->entries[i].v = leakage_voltage; table 5893 drivers/gpu/drm/radeon/si_dpm.c for (j = (table->count - 2); j >= 0; j--) { table 5894 drivers/gpu/drm/radeon/si_dpm.c table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? table 5895 drivers/gpu/drm/radeon/si_dpm.c table->entries[j].v : table->entries[j + 1].v; table 1572 drivers/gpu/drm/radeon/sumo_dpm.c ATOM_CLK_VOLT_CAPABILITY *table) table 1577 drivers/gpu/drm/radeon/sumo_dpm.c if (table[i].ulMaximumSupportedCLK == 0) table 1581 drivers/gpu/drm/radeon/sumo_dpm.c table[i].ulMaximumSupportedCLK; table 1594 drivers/gpu/drm/radeon/sumo_dpm.c ATOM_AVAILABLE_SCLK_LIST *table) table 1601 drivers/gpu/drm/radeon/sumo_dpm.c if (table[i].ulSupportedSCLK > prev_sclk) { table 1603 drivers/gpu/drm/radeon/sumo_dpm.c table[i].ulSupportedSCLK; table 1605 drivers/gpu/drm/radeon/sumo_dpm.c table[i].usVoltageIndex; table 1606 drivers/gpu/drm/radeon/sumo_dpm.c prev_sclk = table[i].ulSupportedSCLK; table 1616 drivers/gpu/drm/radeon/sumo_dpm.c ATOM_AVAILABLE_SCLK_LIST *table) table 1621 drivers/gpu/drm/radeon/sumo_dpm.c if (table[i].ulSupportedSCLK != 0) { table 1622 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table 1623 drivers/gpu/drm/radeon/sumo_dpm.c table[i].usVoltageID; table 1624 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = table 1625 drivers/gpu/drm/radeon/sumo_dpm.c table[i].usVoltageIndex; table 199 drivers/gpu/drm/radeon/sumo_dpm.h ATOM_AVAILABLE_SCLK_LIST *table); table 202 drivers/gpu/drm/radeon/sumo_dpm.h ATOM_AVAILABLE_SCLK_LIST *table); table 1510 drivers/gpu/drm/radeon/trinity_dpm.c struct radeon_vce_clock_voltage_dependency_table *table = table 1514 drivers/gpu/drm/radeon/trinity_dpm.c (table && (table->count == 0))) { table 1519 drivers/gpu/drm/radeon/trinity_dpm.c for (i = 0; i < table->count; i++) { table 1520 drivers/gpu/drm/radeon/trinity_dpm.c if ((evclk <= table->entries[i].evclk) && table 1521 drivers/gpu/drm/radeon/trinity_dpm.c (ecclk <= table->entries[i].ecclk)) { table 1522 drivers/gpu/drm/radeon/trinity_dpm.c *voltage = table->entries[i].v; table 1530 drivers/gpu/drm/radeon/trinity_dpm.c *voltage = table->entries[table->count - 1].v; table 158 drivers/gpu/drm/sun4i/sun8i_csc.c const u32 *table; table 163 drivers/gpu/drm/sun4i/sun8i_csc.c table = yuv2rgb[range][encoding]; table 166 drivers/gpu/drm/sun4i/sun8i_csc.c table = yvu2rgb[range][encoding]; table 174 drivers/gpu/drm/sun4i/sun8i_csc.c regmap_bulk_write(map, base_reg, table, 12); table 182 drivers/gpu/drm/sun4i/sun8i_csc.c const u32 *table; table 187 drivers/gpu/drm/sun4i/sun8i_csc.c table = yuv2rgb_de3[range][encoding]; table 190 drivers/gpu/drm/sun4i/sun8i_csc.c table = yvu2rgb_de3[range][encoding]; table 198 drivers/gpu/drm/sun4i/sun8i_csc.c regmap_bulk_write(map, base_reg, table, 12); table 78 drivers/gpu/drm/tegra/rgb.c const struct reg_entry *table, table 84 drivers/gpu/drm/tegra/rgb.c tegra_dc_writel(dc, table[i].value, table[i].offset); table 65 drivers/gpu/drm/vgem/vgem_drv.c drm_prime_gem_destroy(obj, vgem_obj->table); table 372 drivers/gpu/drm/vgem/vgem_drv.c obj->table = sg; table 380 drivers/gpu/drm/vgem/vgem_drv.c drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, table 50 drivers/gpu/drm/vgem/vgem_drv.h struct sg_table *table; table 1093 drivers/gpu/drm/via/via_verifier.c setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size) table 1098 drivers/gpu/drm/via/via_verifier.c table[i] = forbidden_command; table 1101 drivers/gpu/drm/via/via_verifier.c table[init_table[i].code] = init_table[i].hz; table 47 drivers/gpu/drm/virtio/virtgpu_prime.c struct sg_table *table) table 166 drivers/hid/hid-apple.c const struct apple_key_translation *table, u16 from) table 171 drivers/hid/hid-apple.c for (trans = table; trans->from; trans++) table 182 drivers/hid/hid-apple.c const struct apple_key_translation *trans, *table; table 195 drivers/hid/hid-apple.c table = macbookair_fn_keys; table 197 drivers/hid/hid-apple.c table = powerbook_fn_keys; table 199 drivers/hid/hid-apple.c table = apple_fn_keys; table 201 drivers/hid/hid-apple.c trans = apple_find_translation (table, usage->code); table 626 drivers/hid/hid-debug.c static const char *table[] = {"INPUT", "OUTPUT", "FEATURE"}; table 634 drivers/hid/hid-debug.c seq_printf(f, "%s", table[i]); table 637 drivers/hid/hid-debug.c seq_printf(f, "[%s]", table[report->type]); table 758 drivers/hid/usbhid/hid-pidff.c static int pidff_find_fields(struct pidff_usage *usage, const u8 *table, table 773 drivers/hid/usbhid/hid-pidff.c (HID_UP_PID | table[k])) { table 144 drivers/hwtracing/coresight/coresight-catu.c cate_t *table; table 155 drivers/hwtracing/coresight/coresight-catu.c table = catu_get_table(catu_table, offset, NULL); table 157 drivers/hwtracing/coresight/coresight-catu.c dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]); table 159 drivers/hwtracing/coresight/coresight-catu.c table[CATU_LINK_PREV], table[CATU_LINK_NEXT]); table 348 drivers/hwtracing/coresight/coresight-tmc-etr.c void tmc_sg_table_sync_data_range(struct tmc_sg_table *table, table 353 drivers/hwtracing/coresight/coresight-tmc-etr.c struct device *real_dev = table->dev->parent; table 354 drivers/hwtracing/coresight/coresight-tmc-etr.c struct tmc_pages *data = &table->data_pages; table 409 drivers/hwtracing/coresight/coresight-tmc-etr.c dma_addr_t addr, bool table) table 415 drivers/hwtracing/coresight/coresight-tmc-etr.c if (table) { table 709 drivers/hwtracing/coresight/coresight-tmc-etr.c struct tmc_sg_table *table = etr_table->sg_table; table 712 drivers/hwtracing/coresight/coresight-tmc-etr.c r_offset = tmc_sg_get_data_page_offset(table, rrp); table 714 drivers/hwtracing/coresight/coresight-tmc-etr.c dev_warn(table->dev, table 720 drivers/hwtracing/coresight/coresight-tmc-etr.c w_offset = tmc_sg_get_data_page_offset(table, rwp); table 722 drivers/hwtracing/coresight/coresight-tmc-etr.c dev_warn(table->dev, table 734 drivers/hwtracing/coresight/coresight-tmc-etr.c tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len); table 316 drivers/hwtracing/coresight/coresight-tmc.h void tmc_sg_table_sync_data_range(struct tmc_sg_table *table, table 1427 drivers/i2c/busses/i2c-i801.c struct_size(lookup, table, mux_config->n_gpios), table 1433 drivers/i2c/busses/i2c-i801.c lookup->table[i].chip_label = mux_config->gpio_chip; table 1434 drivers/i2c/busses/i2c-i801.c lookup->table[i].chip_hwnum = mux_config->gpios[i]; table 1435 drivers/i2c/busses/i2c-i801.c lookup->table[i].con_id = "mux"; table 118 drivers/ide/ide-dma-sff.c __le32 *table = (__le32 *)hwif->dmatable_cpu; table 143 drivers/ide/ide-dma-sff.c *table++ = cpu_to_le32(cur_addr); table 150 drivers/ide/ide-dma-sff.c *table++ = cpu_to_le32(0x8000); table 151 drivers/ide/ide-dma-sff.c *table++ = cpu_to_le32(cur_addr + 0x8000); table 154 drivers/ide/ide-dma-sff.c *table++ = cpu_to_le32(xcount); table 162 drivers/ide/ide-dma-sff.c *--table |= cpu_to_le32(0x80000000); table 202 drivers/ide/ide-iops.c int ide_in_drive_list(u16 *id, const struct drive_list_entry *table) table 204 drivers/ide/ide-iops.c for ( ; table->id_model; table++) table 205 drivers/ide/ide-iops.c if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) && table 206 drivers/ide/ide-iops.c (!table->id_firmware || table 207 drivers/ide/ide-iops.c strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware))) table 372 drivers/ide/pmac.c kauai_lookup_timing(struct kauai_timing* table, int cycle_time) table 376 drivers/ide/pmac.c for (i=0; table[i].cycle_time; i++) table 377 drivers/ide/pmac.c if (cycle_time > table[i+1].cycle_time) table 378 drivers/ide/pmac.c return table[i].timing_reg; table 1457 drivers/ide/pmac.c struct dbdma_cmd *table; table 1464 drivers/ide/pmac.c table = (struct dbdma_cmd *) pmif->dma_table_cpu; table 1496 drivers/ide/pmac.c table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE); table 1497 drivers/ide/pmac.c table->req_count = cpu_to_le16(tc); table 1498 drivers/ide/pmac.c table->phy_addr = cpu_to_le32(cur_addr); table 1499 drivers/ide/pmac.c table->cmd_dep = 0; table 1500 drivers/ide/pmac.c table->xfer_status = 0; table 1501 drivers/ide/pmac.c table->res_count = 0; table 1504 drivers/ide/pmac.c ++table; table 1512 drivers/ide/pmac.c table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST); table 1514 drivers/ide/pmac.c memset(table, 0, sizeof(struct dbdma_cmd)); table 1515 drivers/ide/pmac.c table->command = cpu_to_le16(DBDMA_STOP); table 237 drivers/ide/tx4939ide.c u32 *table = (u32 *)hwif->dmatable_cpu; table 265 drivers/ide/tx4939ide.c *table++ = bcount & 0xffff; table 266 drivers/ide/tx4939ide.c *table++ = cur_addr; table 273 drivers/ide/tx4939ide.c *(table - 2) |= 0x80000000; table 178 drivers/iio/light/stk3310.c static int stk3310_get_index(const int table[][2], int table_size, table 184 drivers/iio/light/stk3310.c if (val == table[i][0] && val2 == table[i][1]) table 49 drivers/infiniband/core/cache.c u16 table[0]; table 166 drivers/infiniband/core/cache.c static bool is_gid_index_default(const struct ib_gid_table *table, table 169 drivers/infiniband/core/cache.c return index < 32 && (BIT(index) & table->default_gid_indices); table 237 drivers/infiniband/core/cache.c struct ib_gid_table *table = rdma_gid_table(device, port_num); table 242 drivers/infiniband/core/cache.c write_lock_irq(&table->rwlock); table 250 drivers/infiniband/core/cache.c if (entry == table->data_vec[entry->attr.index]) table 251 drivers/infiniband/core/cache.c table->data_vec[entry->attr.index] = NULL; table 253 drivers/infiniband/core/cache.c write_unlock_irq(&table->rwlock); table 282 drivers/infiniband/core/cache.c struct ib_gid_table *table = rdma_gid_table(device, port_num); table 284 drivers/infiniband/core/cache.c mutex_lock(&table->lock); table 286 drivers/infiniband/core/cache.c mutex_unlock(&table->lock); table 317 drivers/infiniband/core/cache.c static void store_gid_entry(struct ib_gid_table *table, table 326 drivers/infiniband/core/cache.c lockdep_assert_held(&table->lock); table 327 drivers/infiniband/core/cache.c write_lock_irq(&table->rwlock); table 328 drivers/infiniband/core/cache.c table->data_vec[entry->attr.index] = entry; table 329 drivers/infiniband/core/cache.c write_unlock_irq(&table->rwlock); table 379 drivers/infiniband/core/cache.c struct ib_gid_table *table, int ix) table 384 drivers/infiniband/core/cache.c lockdep_assert_held(&table->lock); table 387 drivers/infiniband/core/cache.c ix, table->data_vec[ix]->attr.gid.raw); table 389 drivers/infiniband/core/cache.c write_lock_irq(&table->rwlock); table 390 drivers/infiniband/core/cache.c entry = table->data_vec[ix]; table 396 drivers/infiniband/core/cache.c table->data_vec[ix] = NULL; table 397 drivers/infiniband/core/cache.c write_unlock_irq(&table->rwlock); table 422 drivers/infiniband/core/cache.c static int add_modify_gid(struct ib_gid_table *table, table 432 drivers/infiniband/core/cache.c if (is_gid_entry_valid(table->data_vec[attr->index])) table 433 drivers/infiniband/core/cache.c del_gid(attr->device, attr->port_num, table, attr->index); table 453 drivers/infiniband/core/cache.c store_gid_entry(table, entry); table 462 drivers/infiniband/core/cache.c static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, table 470 drivers/infiniband/core/cache.c while (i < table->sz && (found < 0 || empty < 0)) { table 471 drivers/infiniband/core/cache.c struct ib_gid_table_entry *data = table->data_vec[i]; table 485 drivers/infiniband/core/cache.c is_gid_index_default(table, curr_index)) { table 524 drivers/infiniband/core/cache.c is_gid_index_default(table, curr_index) != default_gid) table 546 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 558 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); table 560 drivers/infiniband/core/cache.c mutex_lock(&table->lock); table 562 drivers/infiniband/core/cache.c ix = find_gid(table, gid, attr, default_gid, mask, &empty); table 574 drivers/infiniband/core/cache.c ret = add_modify_gid(table, attr); table 579 drivers/infiniband/core/cache.c mutex_unlock(&table->lock); table 601 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 605 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); table 607 drivers/infiniband/core/cache.c mutex_lock(&table->lock); table 609 drivers/infiniband/core/cache.c ix = find_gid(table, gid, attr, default_gid, mask, NULL); table 615 drivers/infiniband/core/cache.c del_gid(ib_dev, port, table, ix); table 619 drivers/infiniband/core/cache.c mutex_unlock(&table->lock); table 640 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 644 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); table 646 drivers/infiniband/core/cache.c mutex_lock(&table->lock); table 648 drivers/infiniband/core/cache.c for (ix = 0; ix < table->sz; ix++) { table 649 drivers/infiniband/core/cache.c if (is_gid_entry_valid(table->data_vec[ix]) && table 650 drivers/infiniband/core/cache.c table->data_vec[ix]->attr.ndev == ndev) { table 651 drivers/infiniband/core/cache.c del_gid(ib_dev, port, table, ix); table 656 drivers/infiniband/core/cache.c mutex_unlock(&table->lock); table 686 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 696 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); table 701 drivers/infiniband/core/cache.c read_lock_irqsave(&table->rwlock, flags); table 702 drivers/infiniband/core/cache.c local_index = find_gid(table, gid, &val, false, mask, NULL); table 704 drivers/infiniband/core/cache.c get_gid_entry(table->data_vec[local_index]); table 705 drivers/infiniband/core/cache.c attr = &table->data_vec[local_index]->attr; table 706 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 710 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 739 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 746 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); table 748 drivers/infiniband/core/cache.c read_lock_irqsave(&table->rwlock, flags); table 749 drivers/infiniband/core/cache.c for (i = 0; i < table->sz; i++) { table 750 drivers/infiniband/core/cache.c struct ib_gid_table_entry *entry = table->data_vec[i]; table 764 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 770 drivers/infiniband/core/cache.c struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL); table 772 drivers/infiniband/core/cache.c if (!table) table 775 drivers/infiniband/core/cache.c table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL); table 776 drivers/infiniband/core/cache.c if (!table->data_vec) table 779 drivers/infiniband/core/cache.c mutex_init(&table->lock); table 781 drivers/infiniband/core/cache.c table->sz = sz; table 782 drivers/infiniband/core/cache.c rwlock_init(&table->rwlock); table 783 drivers/infiniband/core/cache.c return table; table 786 drivers/infiniband/core/cache.c kfree(table); table 791 drivers/infiniband/core/cache.c struct ib_gid_table *table) table 796 drivers/infiniband/core/cache.c if (!table) table 799 drivers/infiniband/core/cache.c for (i = 0; i < table->sz; i++) { table 800 drivers/infiniband/core/cache.c if (is_gid_entry_free(table->data_vec[i])) table 802 drivers/infiniband/core/cache.c if (kref_read(&table->data_vec[i]->kref) > 1) { table 805 drivers/infiniband/core/cache.c kref_read(&table->data_vec[i]->kref)); table 812 drivers/infiniband/core/cache.c mutex_destroy(&table->lock); table 813 drivers/infiniband/core/cache.c kfree(table->data_vec); table 814 drivers/infiniband/core/cache.c kfree(table); table 818 drivers/infiniband/core/cache.c struct ib_gid_table *table) table 823 drivers/infiniband/core/cache.c if (!table) table 826 drivers/infiniband/core/cache.c mutex_lock(&table->lock); table 827 drivers/infiniband/core/cache.c for (i = 0; i < table->sz; ++i) { table 828 drivers/infiniband/core/cache.c if (is_gid_entry_valid(table->data_vec[i])) { table 829 drivers/infiniband/core/cache.c del_gid(ib_dev, port, table, i); table 833 drivers/infiniband/core/cache.c mutex_unlock(&table->lock); table 873 drivers/infiniband/core/cache.c struct ib_gid_table *table) table 882 drivers/infiniband/core/cache.c for (i = 0; i < num_default_gids && i < table->sz; i++) table 883 drivers/infiniband/core/cache.c table->default_gid_indices |= BIT(i); table 899 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 903 drivers/infiniband/core/cache.c table = alloc_gid_table( table 905 drivers/infiniband/core/cache.c if (!table) table 908 drivers/infiniband/core/cache.c gid_table_reserve_default(ib_dev, rdma_port, table); table 909 drivers/infiniband/core/cache.c ib_dev->port_data[rdma_port].cache.gid = table; table 958 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 965 drivers/infiniband/core/cache.c table = rdma_gid_table(device, port_num); table 966 drivers/infiniband/core/cache.c read_lock_irqsave(&table->rwlock, flags); table 968 drivers/infiniband/core/cache.c if (index < 0 || index >= table->sz || table 969 drivers/infiniband/core/cache.c !is_gid_entry_valid(table->data_vec[index])) table 972 drivers/infiniband/core/cache.c memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); table 976 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 1008 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 1012 drivers/infiniband/core/cache.c table = device->port_data[p].cache.gid; table 1013 drivers/infiniband/core/cache.c read_lock_irqsave(&table->rwlock, flags); table 1014 drivers/infiniband/core/cache.c index = find_gid(table, gid, &gid_attr_val, false, mask, NULL); table 1018 drivers/infiniband/core/cache.c get_gid_entry(table->data_vec[index]); table 1019 drivers/infiniband/core/cache.c attr = &table->data_vec[index]->attr; table 1020 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 1023 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 1049 drivers/infiniband/core/cache.c *pkey = cache->table[index]; table 1095 drivers/infiniband/core/cache.c if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { table 1096 drivers/infiniband/core/cache.c if (cache->table[i] & 0x8000) { table 1135 drivers/infiniband/core/cache.c if (cache->table[i] == pkey) { table 1203 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 1209 drivers/infiniband/core/cache.c table = rdma_gid_table(device, port_num); table 1210 drivers/infiniband/core/cache.c if (index < 0 || index >= table->sz) table 1213 drivers/infiniband/core/cache.c read_lock_irqsave(&table->rwlock, flags); table 1214 drivers/infiniband/core/cache.c if (!is_gid_entry_valid(table->data_vec[index])) table 1217 drivers/infiniband/core/cache.c get_gid_entry(table->data_vec[index]); table 1218 drivers/infiniband/core/cache.c attr = &table->data_vec[index]->attr; table 1220 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 1283 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 1287 drivers/infiniband/core/cache.c table = rdma_gid_table(device, port_num); table 1289 drivers/infiniband/core/cache.c read_lock_irqsave(&table->rwlock, flags); table 1290 drivers/infiniband/core/cache.c valid = is_gid_entry_valid(table->data_vec[attr->index]); table 1297 drivers/infiniband/core/cache.c read_unlock_irqrestore(&table->rwlock, flags); table 1362 drivers/infiniband/core/cache.c struct ib_gid_table *table; table 1368 drivers/infiniband/core/cache.c table = rdma_gid_table(device, port); table 1370 drivers/infiniband/core/cache.c mutex_lock(&table->lock); table 1382 drivers/infiniband/core/cache.c add_modify_gid(table, &gid_attr); table 1385 drivers/infiniband/core/cache.c mutex_unlock(&table->lock); table 1417 drivers/infiniband/core/cache.c pkey_cache = kmalloc(struct_size(pkey_cache, table, table 1428 drivers/infiniband/core/cache.c ret = ib_query_pkey(device, port, i, pkey_cache->table + i); table 63 drivers/infiniband/core/multicast.c struct rb_root table; table 132 drivers/infiniband/core/multicast.c struct rb_node *node = port->table.rb_node; table 154 drivers/infiniband/core/multicast.c struct rb_node **link = &port->table.rb_node; table 175 drivers/infiniband/core/multicast.c rb_insert_color(&group->node, &port->table); table 192 drivers/infiniband/core/multicast.c rb_erase(&group->node, &port->table); table 535 drivers/infiniband/core/multicast.c rb_erase(&group->node, &group->port->table); table 778 drivers/infiniband/core/multicast.c for (node = rb_first(&port->table); node; node = rb_next(node)) { table 840 drivers/infiniband/core/multicast.c port->table = RB_ROOT; table 51 drivers/infiniband/hw/cxgb4/id_table.c obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); table 53 drivers/infiniband/hw/cxgb4/id_table.c obj = find_first_zero_bit(alloc->table, alloc->max); table 62 drivers/infiniband/hw/cxgb4/id_table.c set_bit(obj, alloc->table); table 78 drivers/infiniband/hw/cxgb4/id_table.c clear_bit(obj, alloc->table); table 95 drivers/infiniband/hw/cxgb4/id_table.c alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), table 97 drivers/infiniband/hw/cxgb4/id_table.c if (!alloc->table) table 100 drivers/infiniband/hw/cxgb4/id_table.c bitmap_zero(alloc->table, num); table 103 drivers/infiniband/hw/cxgb4/id_table.c set_bit(i, alloc->table); table 110 drivers/infiniband/hw/cxgb4/id_table.c kfree(alloc->table); table 93 drivers/infiniband/hw/cxgb4/iw_cxgb4.h unsigned long *table; table 5300 drivers/infiniband/hw/hfi1/chip.c struct flag_table *table, int table_size) table 5314 drivers/infiniband/hw/hfi1/chip.c if (flags & table[i].flag) { table 5315 drivers/infiniband/hw/hfi1/chip.c no_room = append_str(buf, &p, &len, table[i].str); table 5318 drivers/infiniband/hw/hfi1/chip.c flags &= ~table[i].flag; table 11059 drivers/infiniband/hw/hfi1/chip.c memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); table 11065 drivers/infiniband/hw/hfi1/chip.c memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); table 11071 drivers/infiniband/hw/hfi1/chip.c return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); table 297 drivers/infiniband/hw/hfi1/eprom.c struct hfi1_eprom_table_entry *table; table 327 drivers/infiniband/hw/hfi1/eprom.c table = (struct hfi1_eprom_table_entry *) table 338 drivers/infiniband/hw/hfi1/eprom.c table = table_buffer; table 343 drivers/infiniband/hw/hfi1/eprom.c if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) { table 344 drivers/infiniband/hw/hfi1/eprom.c entry = &table[i]; table 1888 drivers/infiniband/hw/hfi1/firmware.c pcfgcache->config_tables[table_type].table = ptr; table 2018 drivers/infiniband/hw/hfi1/firmware.c static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table, table 2028 drivers/infiniband/hw/hfi1/firmware.c switch (table) { table 2040 drivers/infiniband/hw/hfi1/firmware.c if (field && field < platform_config_table_limits[table]) table 2042 drivers/infiniband/hw/hfi1/firmware.c pcfgcache->config_tables[table].table_metadata + field; table 2115 drivers/infiniband/hw/hfi1/firmware.c src_ptr = pcfgcache->config_tables[table_type].table; table 2137 drivers/infiniband/hw/hfi1/firmware.c pcfgcache->config_tables[table_type].table + 4 : table 2138 drivers/infiniband/hw/hfi1/firmware.c pcfgcache->config_tables[table_type].table; table 2147 drivers/infiniband/hw/hfi1/firmware.c src_ptr = pcfgcache->config_tables[table_type].table; table 760 drivers/infiniband/hw/hfi1/hfi.h struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE]; table 156 drivers/infiniband/hw/hfi1/platform.h u32 *table; table 44 drivers/infiniband/hw/hns/hns_roce_alloc.c *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); table 48 drivers/infiniband/hw/hns/hns_roce_alloc.c *obj = find_first_zero_bit(bitmap->table, bitmap->max); table 52 drivers/infiniband/hw/hns/hns_roce_alloc.c set_bit(*obj, bitmap->table); table 83 drivers/infiniband/hw/hns/hns_roce_alloc.c *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, table 88 drivers/infiniband/hw/hns/hns_roce_alloc.c *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0, table 94 drivers/infiniband/hw/hns/hns_roce_alloc.c set_bit(*obj + i, bitmap->table); table 121 drivers/infiniband/hw/hns/hns_roce_alloc.c clear_bit(obj + i, bitmap->table); table 144 drivers/infiniband/hw/hns/hns_roce_alloc.c bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long), table 146 drivers/infiniband/hw/hns/hns_roce_alloc.c if (!bitmap->table) table 150 drivers/infiniband/hw/hns/hns_roce_alloc.c set_bit(i, bitmap->table); table 157 drivers/infiniband/hw/hns/hns_roce_alloc.c kfree(bitmap->table); table 124 drivers/infiniband/hw/hns/hns_roce_cq.c ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); table 166 drivers/infiniband/hw/hns/hns_roce_cq.c hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); table 203 drivers/infiniband/hw/hns/hns_roce_cq.c hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); table 302 drivers/infiniband/hw/hns/hns_roce_device.h unsigned long *table; table 562 drivers/infiniband/hw/hns/hns_roce_device.h struct hns_roce_hem_table table; table 568 drivers/infiniband/hw/hns/hns_roce_device.h struct hns_roce_hem_table table; table 957 drivers/infiniband/hw/hns/hns_roce_device.h struct hns_roce_hem_table *table, int obj, int step_idx); table 959 drivers/infiniband/hw/hns/hns_roce_device.h struct hns_roce_hem_table *table, int obj, table 228 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, unsigned long *obj, table 237 drivers/infiniband/hw/hns/hns_roce_hem.c if (get_hem_table_config(hr_dev, mhop, table->type)) table 247 drivers/infiniband/hw/hns/hns_roce_hem.c bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); table 249 drivers/infiniband/hw/hns/hns_roce_hem.c chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : table 251 drivers/infiniband/hw/hns/hns_roce_hem.c table_idx = (*obj & (table->num_obj - 1)) / table 252 drivers/infiniband/hw/hns/hns_roce_hem.c (chunk_size / table->obj_size); table 268 drivers/infiniband/hw/hns/hns_roce_hem.c table->type, mhop->hop_num); table 363 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, unsigned long obj) table 378 drivers/infiniband/hw/hns/hns_roce_hem.c unsigned long i = (obj & (table->num_obj - 1)) / table 379 drivers/infiniband/hw/hns/hns_roce_hem.c (table->table_chunk_size / table->obj_size); table 381 drivers/infiniband/hw/hns/hns_roce_hem.c switch (table->type) { table 387 drivers/infiniband/hw/hns/hns_roce_hem.c ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); table 399 drivers/infiniband/hw/hns/hns_roce_hem.c for (hns_roce_hem_first(table->hem[i], &iter); table 438 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, table 460 drivers/infiniband/hw/hns/hns_roce_hem.c ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); table 469 drivers/infiniband/hw/hns/hns_roce_hem.c bt_num = hns_roce_get_bt_num(table->type, hop_num); table 486 drivers/infiniband/hw/hns/hns_roce_hem.c table->type, hop_num); table 490 drivers/infiniband/hw/hns/hns_roce_hem.c if (unlikely(hem_idx >= table->num_hem)) { table 492 drivers/infiniband/hw/hns/hns_roce_hem.c table->type, hem_idx, table->num_hem); table 496 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_lock(&table->mutex); table 498 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->hem[hem_idx]) { table 499 drivers/infiniband/hw/hns/hns_roce_hem.c ++table->hem[hem_idx]->refcount; table 504 drivers/infiniband/hw/hns/hns_roce_hem.c if ((check_whether_bt_num_3(table->type, hop_num) || table 505 drivers/infiniband/hw/hns/hns_roce_hem.c check_whether_bt_num_2(table->type, hop_num)) && table 506 drivers/infiniband/hw/hns/hns_roce_hem.c !table->bt_l0[bt_l0_idx]) { table 507 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size, table 508 drivers/infiniband/hw/hns/hns_roce_hem.c &(table->bt_l0_dma_addr[bt_l0_idx]), table 510 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->bt_l0[bt_l0_idx]) { table 517 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->type < HEM_TYPE_MTT) { table 519 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { table 528 drivers/infiniband/hw/hns/hns_roce_hem.c if (check_whether_bt_num_3(table->type, hop_num) && table 529 drivers/infiniband/hw/hns/hns_roce_hem.c !table->bt_l1[bt_l1_idx]) { table 530 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size, table 531 drivers/infiniband/hw/hns/hns_roce_hem.c &(table->bt_l1_dma_addr[bt_l1_idx]), table 533 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->bt_l1[bt_l1_idx]) { table 538 drivers/infiniband/hw/hns/hns_roce_hem.c *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = table 539 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1_dma_addr[bt_l1_idx]; table 543 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { table 554 drivers/infiniband/hw/hns/hns_roce_hem.c size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size; table 555 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev, table 558 drivers/infiniband/hw/hns/hns_roce_hem.c (table->lowmem ? GFP_KERNEL : table 560 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->hem[hem_idx]) { table 565 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_hem_first(table->hem[hem_idx], &iter); table 568 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->type < HEM_TYPE_MTT) { table 570 drivers/infiniband/hw/hns/hns_roce_hem.c *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba; table 573 drivers/infiniband/hw/hns/hns_roce_hem.c *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba; table 583 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { table 589 drivers/infiniband/hw/hns/hns_roce_hem.c *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba; table 592 drivers/infiniband/hw/hns/hns_roce_hem.c ++table->hem[hem_idx]->refcount; table 597 drivers/infiniband/hw/hns/hns_roce_hem.c dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx], table 598 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1_dma_addr[bt_l1_idx]); table 599 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1[bt_l1_idx] = NULL; table 604 drivers/infiniband/hw/hns/hns_roce_hem.c dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx], table 605 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0_dma_addr[bt_l0_idx]); table 606 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0[bt_l0_idx] = NULL; table 610 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_unlock(&table->mutex); table 615 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, unsigned long obj) table 621 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_whether_mhop(hr_dev, table->type)) table 622 drivers/infiniband/hw/hns/hns_roce_hem.c return hns_roce_table_mhop_get(hr_dev, table, obj); table 624 drivers/infiniband/hw/hns/hns_roce_hem.c i = (obj & (table->num_obj - 1)) / (table->table_chunk_size / table 625 drivers/infiniband/hw/hns/hns_roce_hem.c table->obj_size); table 627 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_lock(&table->mutex); table 629 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->hem[i]) { table 630 drivers/infiniband/hw/hns/hns_roce_hem.c ++table->hem[i]->refcount; table 634 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem[i] = hns_roce_alloc_hem(hr_dev, table 635 drivers/infiniband/hw/hns/hns_roce_hem.c table->table_chunk_size >> PAGE_SHIFT, table 636 drivers/infiniband/hw/hns/hns_roce_hem.c table->table_chunk_size, table 637 drivers/infiniband/hw/hns/hns_roce_hem.c (table->lowmem ? GFP_KERNEL : table 639 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->hem[i]) { table 645 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_set_hem(hr_dev, table, obj)) { table 646 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_free_hem(hr_dev, table->hem[i]); table 647 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem[i] = NULL; table 653 drivers/infiniband/hw/hns/hns_roce_hem.c ++table->hem[i]->refcount; table 655 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_unlock(&table->mutex); table 660 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, table 676 drivers/infiniband/hw/hns/hns_roce_hem.c ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); table 684 drivers/infiniband/hw/hns/hns_roce_hem.c bt_num = hns_roce_get_bt_num(table->type, hop_num); table 699 drivers/infiniband/hw/hns/hns_roce_hem.c table->type, hop_num); table 703 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_lock(&table->mutex); table 705 drivers/infiniband/hw/hns/hns_roce_hem.c if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) { table 706 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_unlock(&table->mutex); table 710 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->type < HEM_TYPE_MTT && hop_num == 1) { table 711 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) table 713 drivers/infiniband/hw/hns/hns_roce_hem.c } else if (table->type < HEM_TYPE_MTT && hop_num == 2) { table 714 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2)) table 716 drivers/infiniband/hw/hns/hns_roce_hem.c } else if (table->type < HEM_TYPE_MTT && table 718 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) table 726 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_free_hem(hr_dev, table->hem[hem_idx]); table 727 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem[hem_idx] = NULL; table 729 drivers/infiniband/hw/hns/hns_roce_hem.c if (check_whether_bt_num_2(table->type, hop_num)) { table 731 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_hem_null(table->hem, start_idx, table 732 drivers/infiniband/hw/hns/hns_roce_hem.c chunk_ba_num, table->num_hem)) { table 733 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->type < HEM_TYPE_MTT && table 734 drivers/infiniband/hw/hns/hns_roce_hem.c hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) table 738 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0[mhop.l0_idx], table 739 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0_dma_addr[mhop.l0_idx]); table 740 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0[mhop.l0_idx] = NULL; table 742 drivers/infiniband/hw/hns/hns_roce_hem.c } else if (check_whether_bt_num_3(table->type, hop_num)) { table 745 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_hem_null(table->hem, start_idx, table 746 drivers/infiniband/hw/hns/hns_roce_hem.c chunk_ba_num, table->num_hem)) { table 747 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) table 751 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1[bt_l1_idx], table 752 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1_dma_addr[bt_l1_idx]); table 753 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1[bt_l1_idx] = NULL; table 756 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_bt_null(table->bt_l1, start_idx, table 758 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, obj, table 763 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0[mhop.l0_idx], table 764 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0_dma_addr[mhop.l0_idx]); table 765 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0[mhop.l0_idx] = NULL; table 770 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_unlock(&table->mutex); table 774 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, unsigned long obj) table 779 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_whether_mhop(hr_dev, table->type)) { table 780 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_table_mhop_put(hr_dev, table, obj, 1); table 784 drivers/infiniband/hw/hns/hns_roce_hem.c i = (obj & (table->num_obj - 1)) / table 785 drivers/infiniband/hw/hns/hns_roce_hem.c (table->table_chunk_size / table->obj_size); table 787 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_lock(&table->mutex); table 789 drivers/infiniband/hw/hns/hns_roce_hem.c if (--table->hem[i]->refcount == 0) { table 791 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) table 794 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_free_hem(hr_dev, table->hem[i]); table 795 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem[i] = NULL; table 798 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_unlock(&table->mutex); table 802 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, table 817 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->lowmem) table 820 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_lock(&table->mutex); table 822 drivers/infiniband/hw/hns/hns_roce_hem.c if (!hns_roce_check_whether_mhop(hr_dev, table->type)) { table 823 drivers/infiniband/hw/hns/hns_roce_hem.c obj_per_chunk = table->table_chunk_size / table->obj_size; table 824 drivers/infiniband/hw/hns/hns_roce_hem.c hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk]; table 825 drivers/infiniband/hw/hns/hns_roce_hem.c idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; table 826 drivers/infiniband/hw/hns/hns_roce_hem.c dma_offset = offset = idx_offset * table->obj_size; table 830 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop)) table 841 drivers/infiniband/hw/hns/hns_roce_hem.c hem = table->hem[hem_idx]; table 842 drivers/infiniband/hw/hns/hns_roce_hem.c dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % table 870 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_unlock(&table->mutex); table 875 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, table 879 drivers/infiniband/hw/hns/hns_roce_hem.c unsigned long inc = table->table_chunk_size / table->obj_size; table 883 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_whether_mhop(hr_dev, table->type)) { table 884 drivers/infiniband/hw/hns/hns_roce_hem.c ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); table 887 drivers/infiniband/hw/hns/hns_roce_hem.c inc = mhop.bt_chunk_size / table->obj_size; table 892 drivers/infiniband/hw/hns/hns_roce_hem.c ret = hns_roce_table_get(hr_dev, table, i); table 902 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_table_put(hr_dev, table, i); table 908 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, table 912 drivers/infiniband/hw/hns/hns_roce_hem.c unsigned long inc = table->table_chunk_size / table->obj_size; table 915 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_whether_mhop(hr_dev, table->type)) { table 916 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) table 918 drivers/infiniband/hw/hns/hns_roce_hem.c inc = mhop.bt_chunk_size / table->obj_size; table 922 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_table_put(hr_dev, table, i); table 926 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table, u32 type, table 934 drivers/infiniband/hw/hns/hns_roce_hem.c table->table_chunk_size = hr_dev->caps.chunk_sz; table 935 drivers/infiniband/hw/hns/hns_roce_hem.c obj_per_chunk = table->table_chunk_size / obj_size; table 938 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL); table 939 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->hem) table 963 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem = kcalloc(num_hem, sizeof(*table->hem), table 965 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->hem) table 973 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1 = kcalloc(num_bt_l1, table 974 drivers/infiniband/hw/hns/hns_roce_hem.c sizeof(*table->bt_l1), table 976 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->bt_l1) table 979 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1_dma_addr = kcalloc(num_bt_l1, table 980 drivers/infiniband/hw/hns/hns_roce_hem.c sizeof(*table->bt_l1_dma_addr), table 983 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->bt_l1_dma_addr) table 989 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0), table 991 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->bt_l0) table 994 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0_dma_addr = kcalloc(num_bt_l0, table 995 drivers/infiniband/hw/hns/hns_roce_hem.c sizeof(*table->bt_l0_dma_addr), table 997 drivers/infiniband/hw/hns/hns_roce_hem.c if (!table->bt_l0_dma_addr) table 1002 drivers/infiniband/hw/hns/hns_roce_hem.c table->type = type; table 1003 drivers/infiniband/hw/hns/hns_roce_hem.c table->num_hem = num_hem; table 1004 drivers/infiniband/hw/hns/hns_roce_hem.c table->num_obj = nobj; table 1005 drivers/infiniband/hw/hns/hns_roce_hem.c table->obj_size = obj_size; table 1006 drivers/infiniband/hw/hns/hns_roce_hem.c table->lowmem = use_lowmem; table 1007 drivers/infiniband/hw/hns/hns_roce_hem.c mutex_init(&table->mutex); table 1012 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l0); table 1013 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0 = NULL; table 1016 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l1_dma_addr); table 1017 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1_dma_addr = NULL; table 1020 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l1); table 1021 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1 = NULL; table 1024 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->hem); table 1025 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem = NULL; table 1032 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table) table 1039 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) table 1041 drivers/infiniband/hw/hns/hns_roce_hem.c buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : table 1044 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = 0; i < table->num_hem; ++i) { table 1045 drivers/infiniband/hw/hns/hns_roce_hem.c obj = i * buf_chunk_size / table->obj_size; table 1046 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->hem[i]) table 1047 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_table_mhop_put(hr_dev, table, obj, 0); table 1050 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->hem); table 1051 drivers/infiniband/hw/hns/hns_roce_hem.c table->hem = NULL; table 1052 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l1); table 1053 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1 = NULL; table 1054 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l1_dma_addr); table 1055 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l1_dma_addr = NULL; table 1056 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l0); table 1057 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0 = NULL; table 1058 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->bt_l0_dma_addr); table 1059 drivers/infiniband/hw/hns/hns_roce_hem.c table->bt_l0_dma_addr = NULL; table 1063 drivers/infiniband/hw/hns/hns_roce_hem.c struct hns_roce_hem_table *table) table 1068 drivers/infiniband/hw/hns/hns_roce_hem.c if (hns_roce_check_whether_mhop(hr_dev, table->type)) { table 1069 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_cleanup_mhop_hem_table(hr_dev, table); table 1073 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = 0; i < table->num_hem; ++i) table 1074 drivers/infiniband/hw/hns/hns_roce_hem.c if (table->hem[i]) { table 1075 drivers/infiniband/hw/hns/hns_roce_hem.c if (hr_dev->hw->clear_hem(hr_dev, table, table 1076 drivers/infiniband/hw/hns/hns_roce_hem.c i * table->table_chunk_size / table->obj_size, 0)) table 1079 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_free_hem(hr_dev, table->hem[i]); table 1082 drivers/infiniband/hw/hns/hns_roce_hem.c kfree(table->hem); table 1095 drivers/infiniband/hw/hns/hns_roce_hem.c &hr_dev->srq_table.table); table 1096 drivers/infiniband/hw/hns/hns_roce_hem.c hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); table 112 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, unsigned long obj); table 114 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, unsigned long obj); table 116 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, unsigned long obj, table 119 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, table 122 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, table 125 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, u32 type, table 129 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table); table 132 drivers/infiniband/hw/hns/hns_roce_hem.h struct hns_roce_hem_table *table, unsigned long *obj, table 2421 drivers/infiniband/hw/hns/hns_roce_hw_v1.c struct hns_roce_hem_table *table, int obj, table 2434 drivers/infiniband/hw/hns/hns_roce_hw_v1.c switch (table->type) { table 2451 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); table 1806 drivers/infiniband/hw/hns/hns_roce_hw_v2.c entry = link_tbl->table.buf; table 1820 drivers/infiniband/hw/hns/hns_roce_hw_v2.c cpu_to_le32(link_tbl->table.map & 0xffffffff); table 1822 drivers/infiniband/hw/hns/hns_roce_hw_v2.c cpu_to_le32(link_tbl->table.map >> 32); table 1895 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->table.buf = dma_alloc_coherent(dev, size, table 1896 drivers/infiniband/hw/hns/hns_roce_hw_v2.c &link_tbl->table.map, table 1898 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (!link_tbl->table.buf) table 1906 drivers/infiniband/hw/hns/hns_roce_hw_v2.c entry = link_tbl->table.buf; table 1936 drivers/infiniband/hw/hns/hns_roce_hw_v2.c dma_free_coherent(dev, size, link_tbl->table.buf, table 1937 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->table.map); table 1959 drivers/infiniband/hw/hns/hns_roce_hw_v2.c dma_free_coherent(dev, size, link_tbl->table.buf, table 1960 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->table.map); table 3037 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct hns_roce_hem_table *table, int obj, table 3054 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (!hns_roce_check_whether_mhop(hr_dev, table->type)) table 3057 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); table 3074 drivers/infiniband/hw/hns/hns_roce_hw_v2.c op = get_op_for_set_hem(hr_dev, table->type, step_idx); table 3082 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (table->type == HEM_TYPE_SCCC) table 3086 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hem = table->hem[hem_idx]; table 3098 drivers/infiniband/hw/hns/hns_roce_hw_v2.c bt_ba = table->bt_l0_dma_addr[i]; table 3100 drivers/infiniband/hw/hns/hns_roce_hw_v2.c bt_ba = table->bt_l1_dma_addr[l1_idx]; table 3112 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct hns_roce_hem_table *table, int obj, table 3120 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (!hns_roce_check_whether_mhop(hr_dev, table->type)) table 3123 drivers/infiniband/hw/hns/hns_roce_hw_v2.c switch (table->type) { table 3142 drivers/infiniband/hw/hns/hns_roce_hw_v2.c table->type); table 3146 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (table->type == HEM_TYPE_SCCC || table 3147 drivers/infiniband/hw/hns/hns_roce_hw_v2.c table->type == HEM_TYPE_QPC_TIMER || table 3148 drivers/infiniband/hw/hns/hns_roce_hw_v2.c table->type == HEM_TYPE_CQC_TIMER) table 1612 drivers/infiniband/hw/hns/hns_roce_hw_v2.h struct hns_roce_buf_list table; table 646 drivers/infiniband/hw/hns/hns_roce_main.c ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, table 655 drivers/infiniband/hw/hns/hns_roce_main.c ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, table 755 drivers/infiniband/hw/hns/hns_roce_main.c hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); table 758 drivers/infiniband/hw/hns/hns_roce_main.c hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); table 181 drivers/infiniband/hw/hns/hns_roce_mr.c struct hns_roce_hem_table *table; table 188 drivers/infiniband/hw/hns/hns_roce_mr.c table = &mr_table->mtt_table; table 192 drivers/infiniband/hw/hns/hns_roce_mr.c table = &mr_table->mtt_cqe_table; table 196 drivers/infiniband/hw/hns/hns_roce_mr.c table = &mr_table->mtt_srqwqe_table; table 200 drivers/infiniband/hw/hns/hns_roce_mr.c table = &mr_table->mtt_idx_table; table 212 drivers/infiniband/hw/hns/hns_roce_mr.c if (hns_roce_table_get_range(hr_dev, table, *seg, table 790 drivers/infiniband/hw/hns/hns_roce_mr.c struct hns_roce_hem_table *table; table 798 drivers/infiniband/hw/hns/hns_roce_mr.c table = &hr_dev->mr_table.mtt_table; table 802 drivers/infiniband/hw/hns/hns_roce_mr.c table = &hr_dev->mr_table.mtt_cqe_table; table 806 drivers/infiniband/hw/hns/hns_roce_mr.c table = &hr_dev->mr_table.mtt_srqwqe_table; table 810 drivers/infiniband/hw/hns/hns_roce_mr.c table = &hr_dev->mr_table.mtt_idx_table; table 825 drivers/infiniband/hw/hns/hns_roce_mr.c mtts = hns_roce_table_find(hr_dev, table, table 119 drivers/infiniband/hw/hns/hns_roce_srq.c ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); table 150 drivers/infiniband/hw/hns/hns_roce_srq.c hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); table 174 drivers/infiniband/hw/hns/hns_roce_srq.c hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); table 1377 drivers/infiniband/hw/mlx5/devx.c struct mlx5_devx_event_table *table; table 1382 drivers/infiniband/hw/mlx5/devx.c table = &obj->ib_dev->devx_event_table; table 1384 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); table 2368 drivers/infiniband/hw/mlx5/devx.c struct mlx5_devx_event_table *table; table 2381 drivers/infiniband/hw/mlx5/devx.c table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb); table 2382 drivers/infiniband/hw/mlx5/devx.c dev = container_of(table, struct mlx5_ib_dev, devx_event_table); table 2389 drivers/infiniband/hw/mlx5/devx.c event = xa_load(&table->event_xa, event_type | (obj_type << 16)); table 2416 drivers/infiniband/hw/mlx5/devx.c struct mlx5_devx_event_table *table = &dev->devx_event_table; table 2418 drivers/infiniband/hw/mlx5/devx.c xa_init(&table->event_xa); table 2419 drivers/infiniband/hw/mlx5/devx.c mutex_init(&table->event_xa_lock); table 2420 drivers/infiniband/hw/mlx5/devx.c MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); table 2421 drivers/infiniband/hw/mlx5/devx.c mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); table 2426 drivers/infiniband/hw/mlx5/devx.c struct mlx5_devx_event_table *table = &dev->devx_event_table; table 2432 drivers/infiniband/hw/mlx5/devx.c mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); table 2434 drivers/infiniband/hw/mlx5/devx.c xa_for_each(&table->event_xa, id, entry) { table 2442 drivers/infiniband/hw/mlx5/devx.c xa_destroy(&table->event_xa); table 83 drivers/infiniband/hw/mlx5/srq_cmd.c struct mlx5_srq_table *table = &dev->srq_table; table 86 drivers/infiniband/hw/mlx5/srq_cmd.c xa_lock(&table->array); table 87 drivers/infiniband/hw/mlx5/srq_cmd.c srq = xa_load(&table->array, srqn); table 90 drivers/infiniband/hw/mlx5/srq_cmd.c xa_unlock(&table->array); table 577 drivers/infiniband/hw/mlx5/srq_cmd.c struct mlx5_srq_table *table = &dev->srq_table; table 598 drivers/infiniband/hw/mlx5/srq_cmd.c err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL)); table 612 drivers/infiniband/hw/mlx5/srq_cmd.c struct mlx5_srq_table *table = &dev->srq_table; table 616 drivers/infiniband/hw/mlx5/srq_cmd.c tmp = xa_erase_irq(&table->array, srq->srqn); table 661 drivers/infiniband/hw/mlx5/srq_cmd.c struct mlx5_srq_table *table; table 670 drivers/infiniband/hw/mlx5/srq_cmd.c table = container_of(nb, struct mlx5_srq_table, nb); table 675 drivers/infiniband/hw/mlx5/srq_cmd.c xa_lock(&table->array); table 676 drivers/infiniband/hw/mlx5/srq_cmd.c srq = xa_load(&table->array, srqn); table 679 drivers/infiniband/hw/mlx5/srq_cmd.c xa_unlock(&table->array); table 693 drivers/infiniband/hw/mlx5/srq_cmd.c struct mlx5_srq_table *table = &dev->srq_table; table 695 drivers/infiniband/hw/mlx5/srq_cmd.c memset(table, 0, sizeof(*table)); table 696 drivers/infiniband/hw/mlx5/srq_cmd.c xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ); table 698 drivers/infiniband/hw/mlx5/srq_cmd.c table->nb.notifier_call = srq_event_notifier; table 699 drivers/infiniband/hw/mlx5/srq_cmd.c mlx5_notifier_register(dev->mdev, &table->nb); table 706 drivers/infiniband/hw/mlx5/srq_cmd.c struct mlx5_srq_table *table = &dev->srq_table; table 708 drivers/infiniband/hw/mlx5/srq_cmd.c mlx5_notifier_unregister(dev->mdev, &table->nb); table 47 drivers/infiniband/hw/mthca/mthca_allocator.c obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); table 50 drivers/infiniband/hw/mthca/mthca_allocator.c obj = find_first_zero_bit(alloc->table, alloc->max); table 54 drivers/infiniband/hw/mthca/mthca_allocator.c set_bit(obj, alloc->table); table 72 drivers/infiniband/hw/mthca/mthca_allocator.c clear_bit(obj, alloc->table); table 93 drivers/infiniband/hw/mthca/mthca_allocator.c alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), table 95 drivers/infiniband/hw/mthca/mthca_allocator.c if (!alloc->table) table 98 drivers/infiniband/hw/mthca/mthca_allocator.c bitmap_zero(alloc->table, num); table 100 drivers/infiniband/hw/mthca/mthca_allocator.c set_bit(i, alloc->table); table 107 drivers/infiniband/hw/mthca/mthca_allocator.c kfree(alloc->table); table 784 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); table 879 drivers/infiniband/hw/mthca/mthca_cq.c mthca_table_put(dev, dev->cq_table.table, cq->cqn); table 946 drivers/infiniband/hw/mthca/mthca_cq.c mthca_table_put(dev, dev->cq_table.table, cq->cqn); table 184 drivers/infiniband/hw/mthca/mthca_dev.h unsigned long *table; table 243 drivers/infiniband/hw/mthca/mthca_dev.h struct mthca_icm_table *table; table 250 drivers/infiniband/hw/mthca/mthca_dev.h struct mthca_icm_table *table; table 276 drivers/infiniband/hw/mthca/mthca_dev.h struct mthca_icm_table *table; table 476 drivers/infiniband/hw/mthca/mthca_main.c mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, table 481 drivers/infiniband/hw/mthca/mthca_main.c if (!mdev->cq_table.table) { table 488 drivers/infiniband/hw/mthca/mthca_main.c mdev->srq_table.table = table 494 drivers/infiniband/hw/mthca/mthca_main.c if (!mdev->srq_table.table) { table 507 drivers/infiniband/hw/mthca/mthca_main.c mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, table 514 drivers/infiniband/hw/mthca/mthca_main.c if (!mdev->mcg_table.table) { table 524 drivers/infiniband/hw/mthca/mthca_main.c mthca_free_icm_table(mdev, mdev->srq_table.table); table 527 drivers/infiniband/hw/mthca/mthca_main.c mthca_free_icm_table(mdev, mdev->cq_table.table); table 559 drivers/infiniband/hw/mthca/mthca_main.c mthca_free_icm_table(mdev, mdev->mcg_table.table); table 561 drivers/infiniband/hw/mthca/mthca_main.c mthca_free_icm_table(mdev, mdev->srq_table.table); table 562 drivers/infiniband/hw/mthca/mthca_main.c mthca_free_icm_table(mdev, mdev->cq_table.table); table 222 drivers/infiniband/hw/mthca/mthca_memfree.c int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) table 224 drivers/infiniband/hw/mthca/mthca_memfree.c int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; table 227 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_lock(&table->mutex); table 229 drivers/infiniband/hw/mthca/mthca_memfree.c if (table->icm[i]) { table 230 drivers/infiniband/hw/mthca/mthca_memfree.c ++table->icm[i]->refcount; table 234 drivers/infiniband/hw/mthca/mthca_memfree.c table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, table 235 drivers/infiniband/hw/mthca/mthca_memfree.c (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | table 236 drivers/infiniband/hw/mthca/mthca_memfree.c __GFP_NOWARN, table->coherent); table 237 drivers/infiniband/hw/mthca/mthca_memfree.c if (!table->icm[i]) { table 242 drivers/infiniband/hw/mthca/mthca_memfree.c if (mthca_MAP_ICM(dev, table->icm[i], table 243 drivers/infiniband/hw/mthca/mthca_memfree.c table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) { table 244 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm(dev, table->icm[i], table->coherent); table 245 drivers/infiniband/hw/mthca/mthca_memfree.c table->icm[i] = NULL; table 250 drivers/infiniband/hw/mthca/mthca_memfree.c ++table->icm[i]->refcount; table 253 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_unlock(&table->mutex); table 257 drivers/infiniband/hw/mthca/mthca_memfree.c void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) table 264 drivers/infiniband/hw/mthca/mthca_memfree.c i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; table 266 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_lock(&table->mutex); table 268 drivers/infiniband/hw/mthca/mthca_memfree.c if (--table->icm[i]->refcount == 0) { table 269 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, table 271 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm(dev, table->icm[i], table->coherent); table 272 drivers/infiniband/hw/mthca/mthca_memfree.c table->icm[i] = NULL; table 275 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_unlock(&table->mutex); table 278 drivers/infiniband/hw/mthca/mthca_memfree.c void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle) table 285 drivers/infiniband/hw/mthca/mthca_memfree.c if (!table->lowmem) table 288 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_lock(&table->mutex); table 290 drivers/infiniband/hw/mthca/mthca_memfree.c idx = (obj & (table->num_obj - 1)) * table->obj_size; table 291 drivers/infiniband/hw/mthca/mthca_memfree.c icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; table 317 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_unlock(&table->mutex); table 321 drivers/infiniband/hw/mthca/mthca_memfree.c int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, table 324 drivers/infiniband/hw/mthca/mthca_memfree.c int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; table 328 drivers/infiniband/hw/mthca/mthca_memfree.c err = mthca_table_get(dev, table, i); table 338 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_table_put(dev, table, i); table 344 drivers/infiniband/hw/mthca/mthca_memfree.c void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, table 352 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) table 353 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_table_put(dev, table, i); table 361 drivers/infiniband/hw/mthca/mthca_memfree.c struct mthca_icm_table *table; table 370 drivers/infiniband/hw/mthca/mthca_memfree.c table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL); table 371 drivers/infiniband/hw/mthca/mthca_memfree.c if (!table) table 374 drivers/infiniband/hw/mthca/mthca_memfree.c table->virt = virt; table 375 drivers/infiniband/hw/mthca/mthca_memfree.c table->num_icm = num_icm; table 376 drivers/infiniband/hw/mthca/mthca_memfree.c table->num_obj = nobj; table 377 drivers/infiniband/hw/mthca/mthca_memfree.c table->obj_size = obj_size; table 378 drivers/infiniband/hw/mthca/mthca_memfree.c table->lowmem = use_lowmem; table 379 drivers/infiniband/hw/mthca/mthca_memfree.c table->coherent = use_coherent; table 380 drivers/infiniband/hw/mthca/mthca_memfree.c mutex_init(&table->mutex); table 383 drivers/infiniband/hw/mthca/mthca_memfree.c table->icm[i] = NULL; table 390 drivers/infiniband/hw/mthca/mthca_memfree.c table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, table 393 drivers/infiniband/hw/mthca/mthca_memfree.c if (!table->icm[i]) table 395 drivers/infiniband/hw/mthca/mthca_memfree.c if (mthca_MAP_ICM(dev, table->icm[i], table 397 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm(dev, table->icm[i], table->coherent); table 398 drivers/infiniband/hw/mthca/mthca_memfree.c table->icm[i] = NULL; table 406 drivers/infiniband/hw/mthca/mthca_memfree.c ++table->icm[i]->refcount; table 409 drivers/infiniband/hw/mthca/mthca_memfree.c return table; table 413 drivers/infiniband/hw/mthca/mthca_memfree.c if (table->icm[i]) { table 416 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm(dev, table->icm[i], table->coherent); table 419 drivers/infiniband/hw/mthca/mthca_memfree.c kfree(table); table 424 drivers/infiniband/hw/mthca/mthca_memfree.c void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) table 428 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < table->num_icm; ++i) table 429 drivers/infiniband/hw/mthca/mthca_memfree.c if (table->icm[i]) { table 431 drivers/infiniband/hw/mthca/mthca_memfree.c table->virt + i * MTHCA_TABLE_CHUNK_SIZE, table 433 drivers/infiniband/hw/mthca/mthca_memfree.c mthca_free_icm(dev, table->icm[i], table->coherent); table 436 drivers/infiniband/hw/mthca/mthca_memfree.c kfree(table); table 90 drivers/infiniband/hw/mthca/mthca_memfree.h void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table); table 91 drivers/infiniband/hw/mthca/mthca_memfree.h int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj); table 92 drivers/infiniband/hw/mthca/mthca_memfree.h void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj); table 93 drivers/infiniband/hw/mthca/mthca_memfree.h void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle); table 94 drivers/infiniband/hw/mthca/mthca_memfree.h int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, table 96 drivers/infiniband/hw/mthca/mthca_memfree.h void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, table 242 drivers/infiniband/hw/mthca/mthca_srq.c err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); table 319 drivers/infiniband/hw/mthca/mthca_srq.c mthca_table_put(dev, dev->srq_table.table, srq->srqn); table 367 drivers/infiniband/hw/mthca/mthca_srq.c mthca_table_put(dev, dev->srq_table.table, srq->srqn); table 106 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h unsigned long *table; table 66 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); table 67 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c if (!tbl->table) table 71 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c set_bit(0, tbl->table); table 80 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c kfree(tbl->table); table 92 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last); table 95 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c obj = find_first_zero_bit(tbl->table, tbl->max); table 103 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c set_bit(obj, tbl->table); table 123 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c clear_bit(obj, tbl->table); table 147 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c u64 *table = pdir->tables[i]; table 149 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (table) table 151 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c table, pdir->dir[i]); table 171 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c u64 *table; table 176 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c table = pvrdma_page_dir_table(pdir, idx); table 177 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr; table 89 drivers/infiniband/sw/rdmavt/mr.c lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table); table 90 drivers/infiniband/sw/rdmavt/mr.c rdi->lkey_table.table = (struct rvt_mregion __rcu **) table 92 drivers/infiniband/sw/rdmavt/mr.c if (!rdi->lkey_table.table) table 97 drivers/infiniband/sw/rdmavt/mr.c RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); table 115 drivers/infiniband/sw/rdmavt/mr.c vfree(rdi->lkey_table.table); table 208 drivers/infiniband/sw/rdmavt/mr.c if (!rcu_access_pointer(rkt->table[r])) table 232 drivers/infiniband/sw/rdmavt/mr.c rcu_assign_pointer(rkt->table[r], mr); table 271 drivers/infiniband/sw/rdmavt/mr.c rcu_assign_pointer(rkt->table[r], NULL); table 702 drivers/infiniband/sw/rdmavt/mr.c rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]); table 951 drivers/infiniband/sw/rdmavt/mr.c mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]); table 1059 drivers/infiniband/sw/rdmavt/mr.c mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); table 184 drivers/infiniband/sw/rxe/rxe_pool.c pool->table = kmalloc(size, GFP_KERNEL); table 185 drivers/infiniband/sw/rxe/rxe_pool.c if (!pool->table) { table 191 drivers/infiniband/sw/rxe/rxe_pool.c bitmap_zero(pool->table, max - min + 1); table 246 drivers/infiniband/sw/rxe/rxe_pool.c kfree(pool->table); table 273 drivers/infiniband/sw/rxe/rxe_pool.c index = find_next_zero_bit(pool->table, range, pool->last); table 275 drivers/infiniband/sw/rxe/rxe_pool.c index = find_first_zero_bit(pool->table, range); table 278 drivers/infiniband/sw/rxe/rxe_pool.c set_bit(index, pool->table); table 383 drivers/infiniband/sw/rxe/rxe_pool.c clear_bit(elem->index - pool->min_index, pool->table); table 107 drivers/infiniband/sw/rxe/rxe_pool.h unsigned long *table; table 1051 drivers/input/keyboard/applespi.c applespi_find_translation(const struct applespi_key_translation *table, u16 key) table 1055 drivers/input/keyboard/applespi.c for (trans = table; trans->from; trans++) table 3681 drivers/iommu/amd_iommu.c static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) table 3687 drivers/iommu/amd_iommu.c dte |= iommu_virt_to_phys(table->table); table 3697 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 3703 drivers/iommu/amd_iommu.c table = irq_lookup_table[devid]; table 3704 drivers/iommu/amd_iommu.c if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid)) table 3707 drivers/iommu/amd_iommu.c return table; table 3712 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 3714 drivers/iommu/amd_iommu.c table = kzalloc(sizeof(*table), GFP_KERNEL); table 3715 drivers/iommu/amd_iommu.c if (!table) table 3718 drivers/iommu/amd_iommu.c table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); table 3719 drivers/iommu/amd_iommu.c if (!table->table) { table 3720 drivers/iommu/amd_iommu.c kfree(table); table 3723 drivers/iommu/amd_iommu.c raw_spin_lock_init(&table->lock); table 3726 drivers/iommu/amd_iommu.c memset(table->table, 0, table 3729 drivers/iommu/amd_iommu.c memset(table->table, 0, table 3731 drivers/iommu/amd_iommu.c return table; table 3735 drivers/iommu/amd_iommu.c struct irq_remap_table *table) table 3737 drivers/iommu/amd_iommu.c irq_lookup_table[devid] = table; table 3738 drivers/iommu/amd_iommu.c set_dte_irq_entry(devid, table); table 3745 drivers/iommu/amd_iommu.c struct irq_remap_table *table = data; table 3747 drivers/iommu/amd_iommu.c irq_lookup_table[alias] = table; table 3748 drivers/iommu/amd_iommu.c set_dte_irq_entry(alias, table); table 3757 drivers/iommu/amd_iommu.c struct irq_remap_table *table = NULL; table 3769 drivers/iommu/amd_iommu.c table = irq_lookup_table[devid]; table 3770 drivers/iommu/amd_iommu.c if (table) table 3774 drivers/iommu/amd_iommu.c table = irq_lookup_table[alias]; table 3775 drivers/iommu/amd_iommu.c if (table) { table 3776 drivers/iommu/amd_iommu.c set_remap_table_entry(iommu, devid, table); table 3788 drivers/iommu/amd_iommu.c table = irq_lookup_table[devid]; table 3789 drivers/iommu/amd_iommu.c if (table) table 3792 drivers/iommu/amd_iommu.c table = irq_lookup_table[alias]; table 3793 drivers/iommu/amd_iommu.c if (table) { table 3794 drivers/iommu/amd_iommu.c set_remap_table_entry(iommu, devid, table); table 3798 drivers/iommu/amd_iommu.c table = new_table; table 3803 drivers/iommu/amd_iommu.c table); table 3805 drivers/iommu/amd_iommu.c set_remap_table_entry(iommu, devid, table); table 3808 drivers/iommu/amd_iommu.c set_remap_table_entry(iommu, alias, table); table 3817 drivers/iommu/amd_iommu.c kmem_cache_free(amd_iommu_irq_cache, new_table->table); table 3820 drivers/iommu/amd_iommu.c return table; table 3826 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 3834 drivers/iommu/amd_iommu.c table = alloc_irq_table(devid, pdev); table 3835 drivers/iommu/amd_iommu.c if (!table) table 3841 drivers/iommu/amd_iommu.c raw_spin_lock_irqsave(&table->lock, flags); table 3844 drivers/iommu/amd_iommu.c for (index = ALIGN(table->min_index, alignment), c = 0; table 3846 drivers/iommu/amd_iommu.c if (!iommu->irte_ops->is_allocated(table, index)) { table 3856 drivers/iommu/amd_iommu.c iommu->irte_ops->set_allocated(table, index - c + 1); table 3868 drivers/iommu/amd_iommu.c raw_spin_unlock_irqrestore(&table->lock, flags); table 3876 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 3885 drivers/iommu/amd_iommu.c table = get_irq_table(devid); table 3886 drivers/iommu/amd_iommu.c if (!table) table 3889 drivers/iommu/amd_iommu.c raw_spin_lock_irqsave(&table->lock, flags); table 3891 drivers/iommu/amd_iommu.c entry = (struct irte_ga *)table->table; table 3900 drivers/iommu/amd_iommu.c raw_spin_unlock_irqrestore(&table->lock, flags); table 3910 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 3918 drivers/iommu/amd_iommu.c table = get_irq_table(devid); table 3919 drivers/iommu/amd_iommu.c if (!table) table 3922 drivers/iommu/amd_iommu.c raw_spin_lock_irqsave(&table->lock, flags); table 3923 drivers/iommu/amd_iommu.c table->table[index] = irte->val; table 3924 drivers/iommu/amd_iommu.c raw_spin_unlock_irqrestore(&table->lock, flags); table 3934 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 3942 drivers/iommu/amd_iommu.c table = get_irq_table(devid); table 3943 drivers/iommu/amd_iommu.c if (!table) table 3946 drivers/iommu/amd_iommu.c raw_spin_lock_irqsave(&table->lock, flags); table 3947 drivers/iommu/amd_iommu.c iommu->irte_ops->clear_allocated(table, index); table 3948 drivers/iommu/amd_iommu.c raw_spin_unlock_irqrestore(&table->lock, flags); table 4042 drivers/iommu/amd_iommu.c static void irte_set_allocated(struct irq_remap_table *table, int index) table 4044 drivers/iommu/amd_iommu.c table->table[index] = IRTE_ALLOCATED; table 4047 drivers/iommu/amd_iommu.c static void irte_ga_set_allocated(struct irq_remap_table *table, int index) table 4049 drivers/iommu/amd_iommu.c struct irte_ga *ptr = (struct irte_ga *)table->table; table 4057 drivers/iommu/amd_iommu.c static bool irte_is_allocated(struct irq_remap_table *table, int index) table 4059 drivers/iommu/amd_iommu.c union irte *ptr = (union irte *)table->table; table 4065 drivers/iommu/amd_iommu.c static bool irte_ga_is_allocated(struct irq_remap_table *table, int index) table 4067 drivers/iommu/amd_iommu.c struct irte_ga *ptr = (struct irte_ga *)table->table; table 4073 drivers/iommu/amd_iommu.c static void irte_clear_allocated(struct irq_remap_table *table, int index) table 4075 drivers/iommu/amd_iommu.c table->table[index] = 0; table 4078 drivers/iommu/amd_iommu.c static void irte_ga_clear_allocated(struct irq_remap_table *table, int index) table 4080 drivers/iommu/amd_iommu.c struct irte_ga *ptr = (struct irte_ga *)table->table; table 4264 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 4267 drivers/iommu/amd_iommu.c table = alloc_irq_table(devid, NULL); table 4268 drivers/iommu/amd_iommu.c if (table) { table 4269 drivers/iommu/amd_iommu.c if (!table->min_index) { table 4274 drivers/iommu/amd_iommu.c table->min_index = 32; table 4277 drivers/iommu/amd_iommu.c iommu->irte_ops->set_allocated(table, i); table 4279 drivers/iommu/amd_iommu.c WARN_ON(table->min_index != 32); table 4593 drivers/iommu/amd_iommu.c struct irq_remap_table *table; table 4607 drivers/iommu/amd_iommu.c table = get_irq_table(devid); table 4608 drivers/iommu/amd_iommu.c if (!table) table 4611 drivers/iommu/amd_iommu.c raw_spin_lock_irqsave(&table->lock, flags); table 4624 drivers/iommu/amd_iommu.c raw_spin_unlock_irqrestore(&table->lock, flags); table 538 drivers/iommu/amd_iommu_init.c static int __init check_ivrs_checksum(struct acpi_table_header *table) table 541 drivers/iommu/amd_iommu_init.c u8 checksum = 0, *p = (u8 *)table; table 543 drivers/iommu/amd_iommu_init.c for (i = 0; i < table->length; ++i) table 559 drivers/iommu/amd_iommu_init.c static int __init find_last_devid_acpi(struct acpi_table_header *table) table 561 drivers/iommu/amd_iommu_init.c u8 *p = (u8 *)table, *end = (u8 *)table; table 566 drivers/iommu/amd_iommu_init.c end += table->length; table 1620 drivers/iommu/amd_iommu_init.c static int __init init_iommu_all(struct acpi_table_header *table) table 1622 drivers/iommu/amd_iommu_init.c u8 *p = (u8 *)table, *end = (u8 *)table; table 1627 drivers/iommu/amd_iommu_init.c end += table->length; table 2151 drivers/iommu/amd_iommu_init.c static int __init init_memory_definitions(struct acpi_table_header *table) table 2153 drivers/iommu/amd_iommu_init.c u8 *p = (u8 *)table, *end = (u8 *)table; table 2156 drivers/iommu/amd_iommu_init.c end += table->length; table 415 drivers/iommu/amd_iommu_types.h u32 *table; table 1928 drivers/iommu/intel-iommu.c static inline unsigned long context_get_sm_pds(struct pasid_table *table) table 1932 drivers/iommu/intel-iommu.c max_pde = table->max_pasid >> PASID_PDE_SHIFT; table 1975 drivers/iommu/intel-iommu.c struct pasid_table *table, table 2034 drivers/iommu/intel-iommu.c WARN_ON(!table); table 2037 drivers/iommu/intel-iommu.c pds = context_get_sm_pds(table); table 2038 drivers/iommu/intel-iommu.c context->lo = (u64)virt_to_phys(table->table) | table 2124 drivers/iommu/intel-iommu.c struct pasid_table *table; table 2133 drivers/iommu/intel-iommu.c data->table, PCI_BUS_NUM(alias), table 2141 drivers/iommu/intel-iommu.c struct pasid_table *table; table 2149 drivers/iommu/intel-iommu.c table = intel_pasid_get_table(dev); table 2152 drivers/iommu/intel-iommu.c return domain_context_mapping_one(domain, iommu, table, table 2157 drivers/iommu/intel-iommu.c data.table = table; table 74 drivers/iommu/intel-pasid.c list_add(&info->table, &pasid_table->dev); table 82 drivers/iommu/intel-pasid.c list_del(&info->table); table 162 drivers/iommu/intel-pasid.c pasid_table->table = page_address(pages); table 177 drivers/iommu/intel-pasid.c struct pasid_entry *table; table 191 drivers/iommu/intel-pasid.c dir = pasid_table->table; table 194 drivers/iommu/intel-pasid.c table = get_pasid_table_from_pde(&dir[i]); table 195 drivers/iommu/intel-pasid.c free_pgtable_page(table); table 198 drivers/iommu/intel-pasid.c free_pages((unsigned long)pasid_table->table, pasid_table->order); table 237 drivers/iommu/intel-pasid.c dir = pasid_table->table; table 50 drivers/iommu/intel-pasid.h void *table; /* pasid table pointer */ table 107 drivers/iommu/intel_irq_remapping.c struct ir_table *table = iommu->ir_table; table 128 drivers/iommu/intel_irq_remapping.c index = bitmap_find_free_region(table->bitmap, table 240 drivers/iommu/io-pgtable-arm-v7s.c void *table = NULL; table 243 drivers/iommu/io-pgtable-arm-v7s.c table = (void *)__get_free_pages( table 246 drivers/iommu/io-pgtable-arm-v7s.c table = kmem_cache_zalloc(data->l2_tables, gfp); table 247 drivers/iommu/io-pgtable-arm-v7s.c phys = virt_to_phys(table); table 253 drivers/iommu/io-pgtable-arm-v7s.c if (table && !cfg->coherent_walk) { table 254 drivers/iommu/io-pgtable-arm-v7s.c dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); table 266 drivers/iommu/io-pgtable-arm-v7s.c kmemleak_ignore(table); table 267 drivers/iommu/io-pgtable-arm-v7s.c return table; table 274 drivers/iommu/io-pgtable-arm-v7s.c free_pages((unsigned long)table, get_order(size)); table 276 drivers/iommu/io-pgtable-arm-v7s.c kmem_cache_free(data->l2_tables, table); table 280 drivers/iommu/io-pgtable-arm-v7s.c static void __arm_v7s_free_table(void *table, int lvl, table 288 drivers/iommu/io-pgtable-arm-v7s.c dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, table 291 drivers/iommu/io-pgtable-arm-v7s.c free_pages((unsigned long)table, get_order(size)); table 293 drivers/iommu/io-pgtable-arm-v7s.c kmem_cache_free(data->l2_tables, table); table 449 drivers/iommu/io-pgtable-arm-v7s.c static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, table 456 drivers/iommu/io-pgtable-arm-v7s.c new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE; table 351 drivers/iommu/io-pgtable-arm.c static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, table 358 drivers/iommu/io-pgtable-arm.c new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; table 2318 drivers/irqchip/irq-gic-v3-its.c __le64 *table; table 2330 drivers/irqchip/irq-gic-v3-its.c table = baser->base; table 2333 drivers/irqchip/irq-gic-v3-its.c if (!table[idx]) { table 2343 drivers/irqchip/irq-gic-v3-its.c table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); table 2347 drivers/irqchip/irq-gic-v3-its.c gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); table 247 drivers/leds/leds-lm3532.c static int lm3532_get_index(const int table[], int size, int value) table 252 drivers/leds/leds-lm3532.c if (value == table[i]) table 256 drivers/leds/leds-lm3532.c if (value > table[i - 1] && table 257 drivers/leds/leds-lm3532.c value < table[i]) { table 258 drivers/leds/leds-lm3532.c if (value - table[i - 1] < table[i] - value) table 185 drivers/macintosh/mac_hid.c static int mac_hid_toggle_emumouse(struct ctl_table *table, int write, table 189 drivers/macintosh/mac_hid.c int *valp = table->data; table 197 drivers/macintosh/mac_hid.c rc = proc_dointvec(table, write, buffer, lenp, ppos); table 788 drivers/md/dm-cache-metadata.c static LIST_HEAD(table); table 794 drivers/md/dm-cache-metadata.c list_for_each_entry(cmd, &table, list) table 829 drivers/md/dm-cache-metadata.c list_add(&cmd->list, &table); table 840 drivers/md/dm-cache-policy-smq.c struct smq_hash_table table; table 966 drivers/md/dm-cache-policy-smq.c h_insert(&mq->table, e); table 981 drivers/md/dm-cache-policy-smq.c h_insert(&mq->table, e); table 1029 drivers/md/dm-cache-policy-smq.c static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1}; table 1034 drivers/md/dm-cache-policy-smq.c return table[index]; table 1356 drivers/md/dm-cache-policy-smq.c h_exit(&mq->table); table 1374 drivers/md/dm-cache-policy-smq.c e = h_lookup(&mq->table, oblock); table 1483 drivers/md/dm-cache-policy-smq.c h_remove(&mq->table, e); table 1587 drivers/md/dm-cache-policy-smq.c h_remove(&mq->table, e); table 1793 drivers/md/dm-cache-policy-smq.c if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) table 1816 drivers/md/dm-cache-policy-smq.c h_exit(&mq->table); table 715 drivers/md/dm-cache-target.c dm_table_event(cache->ti->table); table 950 drivers/md/dm-cache-target.c return dm_device_name(dm_table_get_md(cache->ti->table)); table 961 drivers/md/dm-cache-target.c dm_table_event(cache->ti->table); table 2496 drivers/md/dm-cache-target.c dm_table_add_target_callbacks(ti->table, &cache->callbacks); table 166 drivers/md/dm-clone-target.c return dm_table_device_name(clone->ti->table); table 197 drivers/md/dm-clone-target.c dm_table_event(clone->ti->table); table 1175 drivers/md/dm-clone-target.c dm_table_event(clone->ti->table); table 1930 drivers/md/dm-clone-target.c dm_table_add_target_callbacks(ti->table, &clone->callbacks); table 849 drivers/md/dm-crypt.c struct mapped_device *md = dm_table_get_md(ti->table); table 2563 drivers/md/dm-crypt.c const char *devname = dm_table_device_name(ti->table); table 2676 drivers/md/dm-crypt.c ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev); table 156 drivers/md/dm-delay.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev); table 327 drivers/md/dm-dust.c if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) { table 1518 drivers/md/dm-era-target.c dm_table_add_target_callbacks(ti->table, &era->callbacks); table 246 drivers/md/dm-flakey.c r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev); table 34 drivers/md/dm-init.c struct dm_target_spec *table[DM_MAX_TARGETS]; table 67 drivers/md/dm-init.c kfree(dev->table[i]); table 128 drivers/md/dm-init.c dev->table[n] = sp; table 290 drivers/md/dm-init.c if (dm_early_create(&dev->dmi, dev->table, table 3132 drivers/md/dm-integrity.c struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); table 3612 drivers/md/dm-integrity.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); table 3681 drivers/md/dm-integrity.c dm_table_get_mode(ti->table), &ic->meta_dev); table 256 drivers/md/dm-ioctl.c struct dm_table *table; table 266 drivers/md/dm-ioctl.c table = dm_get_live_table(hc->md, &srcu_idx); table 267 drivers/md/dm-ioctl.c if (table) table 268 drivers/md/dm-ioctl.c dm_table_event(table); table 271 drivers/md/dm-ioctl.c table = NULL; table 273 drivers/md/dm-ioctl.c table = hc->new_map; table 277 drivers/md/dm-ioctl.c return table; table 374 drivers/md/dm-ioctl.c struct dm_table *table; table 440 drivers/md/dm-ioctl.c table = dm_get_live_table(hc->md, &srcu_idx); table 441 drivers/md/dm-ioctl.c if (table) table 442 drivers/md/dm-ioctl.c dm_table_event(table); table 685 drivers/md/dm-ioctl.c struct dm_table *table = NULL; table 697 drivers/md/dm-ioctl.c table = hc->new_map; table 702 drivers/md/dm-ioctl.c return table; table 720 drivers/md/dm-ioctl.c struct dm_table *table; table 747 drivers/md/dm-ioctl.c table = dm_get_live_table(md, &srcu_idx); table 748 drivers/md/dm-ioctl.c if (table) { table 752 drivers/md/dm-ioctl.c param->target_count = dm_table_get_num_targets(table); table 761 drivers/md/dm-ioctl.c table = dm_get_inactive_table(md, &srcu_idx); table 762 drivers/md/dm-ioctl.c if (table) { table 763 drivers/md/dm-ioctl.c if (!(dm_table_get_mode(table) & FMODE_WRITE)) table 765 drivers/md/dm-ioctl.c param->target_count = dm_table_get_num_targets(table); table 1136 drivers/md/dm-ioctl.c static void retrieve_status(struct dm_table *table, table 1154 drivers/md/dm-ioctl.c num_targets = dm_table_get_num_targets(table); table 1156 drivers/md/dm-ioctl.c struct dm_target *ti = dm_table_get_target(table, i); table 1214 drivers/md/dm-ioctl.c struct dm_table *table; table 1236 drivers/md/dm-ioctl.c table = dm_get_live_or_inactive_table(md, param, &srcu_idx); table 1237 drivers/md/dm-ioctl.c if (table) table 1238 drivers/md/dm-ioctl.c retrieve_status(table, param, param_size); table 1282 drivers/md/dm-ioctl.c static int populate_table(struct dm_table *table, table 1305 drivers/md/dm-ioctl.c r = dm_table_add_target(table, spec->target_type, table 1317 drivers/md/dm-ioctl.c return dm_table_complete(table); table 1454 drivers/md/dm-ioctl.c static void retrieve_deps(struct dm_table *table, table 1468 drivers/md/dm-ioctl.c list_for_each (tmp, dm_table_get_devices(table)) table 1485 drivers/md/dm-ioctl.c list_for_each_entry (dd, dm_table_get_devices(table), list) table 1494 drivers/md/dm-ioctl.c struct dm_table *table; table 1503 drivers/md/dm-ioctl.c table = dm_get_live_or_inactive_table(md, param, &srcu_idx); table 1504 drivers/md/dm-ioctl.c if (table) table 1505 drivers/md/dm-ioctl.c retrieve_deps(table, param, param_size); table 1520 drivers/md/dm-ioctl.c struct dm_table *table; table 1529 drivers/md/dm-ioctl.c table = dm_get_live_or_inactive_table(md, param, &srcu_idx); table 1530 drivers/md/dm-ioctl.c if (table) table 1531 drivers/md/dm-ioctl.c retrieve_status(table, param, param_size); table 1577 drivers/md/dm-ioctl.c struct dm_table *table; table 1610 drivers/md/dm-ioctl.c table = dm_get_live_table(md, &srcu_idx); table 1611 drivers/md/dm-ioctl.c if (!table) table 1619 drivers/md/dm-ioctl.c ti = dm_table_find_target(table, tmsg->sector); table 54 drivers/md/dm-linear.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev); table 166 drivers/md/dm-log-userspace-base.c dm_table_event(lc->ti->table); table 294 drivers/md/dm-log-userspace-base.c dm_table_get_mode(ti->table), &lc->log_dev); table 637 drivers/md/dm-log-userspace-base.c dm_table_event(lc->ti->table); table 557 drivers/md/dm-log-writes.c ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); table 564 drivers/md/dm-log-writes.c ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), table 553 drivers/md/dm-log.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); table 582 drivers/md/dm-log.c dm_table_event(lc->ti->table); table 216 drivers/md/dm-mpath.c dm_table_set_type(ti->table, m->queue_mode); table 439 drivers/md/dm-mpath.c struct mapped_device *md = dm_table_get_md((m)->ti->table); \ table 644 drivers/md/dm-mpath.c dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); table 715 drivers/md/dm-mpath.c dm_table_run_md_queue_async(m->ti->table); table 731 drivers/md/dm-mpath.c dm_table_event(m->ti->table); table 855 drivers/md/dm-mpath.c r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), table 1292 drivers/md/dm-mpath.c dm_table_run_md_queue_async(m->ti->table); table 1835 drivers/md/dm-mpath.c r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); table 1885 drivers/md/dm-mpath.c dm_table_run_md_queue_async(m->ti->table); table 697 drivers/md/dm-raid.c struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); table 848 drivers/md/dm-raid.c r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), table 881 drivers/md/dm-raid.c r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), table 1251 drivers/md/dm-raid.c r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), table 1720 drivers/md/dm-raid.c dm_table_event(rs->ti->table); table 3245 drivers/md/dm-raid.c dm_table_add_target_callbacks(ti->table, &rs->callbacks); table 406 drivers/md/dm-raid1.c dm_table_event(ms->ti->table); table 844 drivers/md/dm-raid1.c dm_table_event(ms->ti->table); table 949 drivers/md/dm-raid1.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), table 45 drivers/md/dm-snap.c struct hlist_bl_head *table; table 638 drivers/md/dm-snap.c lock->complete_slot = &complete->table[exception_hash(complete, chunk)]; table 639 drivers/md/dm-snap.c lock->pending_slot = &pending->table[exception_hash(pending, chunk)]; table 661 drivers/md/dm-snap.c et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head)); table 662 drivers/md/dm-snap.c if (!et->table) table 666 drivers/md/dm-snap.c INIT_HLIST_BL_HEAD(et->table + i); table 681 drivers/md/dm-snap.c slot = et->table + i; table 687 drivers/md/dm-snap.c vfree(et->table); table 711 drivers/md/dm-snap.c slot = &et->table[exception_hash(et, chunk)]; table 763 drivers/md/dm-snap.c l = &eh->table[exception_hash(eh, new_e->old_chunk)]; table 1288 drivers/md/dm-snap.c r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); table 1626 drivers/md/dm-snap.c dm_table_event(s->ti->table); table 2231 drivers/md/dm-snap.c origin_md = dm_table_get_md(o->ti->table); table 2235 drivers/md/dm-snap.c origin_md = dm_table_get_md(snap_merging->ti->table); table 2237 drivers/md/dm-snap.c if (origin_md == dm_table_get_md(ti->table)) table 2441 drivers/md/dm-snap.c if (sector >= dm_table_get_size(snap->ti->table)) table 2625 drivers/md/dm-snap.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev); table 55 drivers/md/dm-stripe.c dm_table_event(sc->ti->table); table 84 drivers/md/dm-stripe.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), table 210 drivers/md/dm-switch.c r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), table 300 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b), table 313 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b), table 329 drivers/md/dm-table.c dm_device_name(ti->table->md), table 346 drivers/md/dm-table.c dm_device_name(ti->table->md), table 359 drivers/md/dm-table.c dm_device_name(ti->table->md), table 368 drivers/md/dm-table.c dm_device_name(ti->table->md), table 432 drivers/md/dm-table.c struct dm_table *t = ti->table; table 477 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b)); table 485 drivers/md/dm-table.c dm_device_name(ti->table->md), bdevname(bdev, b), table 502 drivers/md/dm-table.c struct list_head *devices = &ti->table->devices; table 513 drivers/md/dm-table.c dm_device_name(ti->table->md), d->name); table 517 drivers/md/dm-table.c dm_put_table_device(ti->table->md, d); table 527 drivers/md/dm-table.c static int adjoin(struct dm_table *table, struct dm_target *ti) table 531 drivers/md/dm-table.c if (!table->num_targets) table 534 drivers/md/dm-table.c prev = &table->targets[table->num_targets - 1]; table 641 drivers/md/dm-table.c static int validate_hardware_logical_block_alignment(struct dm_table *table, table 669 drivers/md/dm-table.c for (i = 0; i < dm_table_get_num_targets(table); i++) { table 670 drivers/md/dm-table.c ti = dm_table_get_target(table, i); table 698 drivers/md/dm-table.c dm_device_name(table->md), i, table 766 drivers/md/dm-table.c tgt->table = t; table 1399 drivers/md/dm-table.c bool dm_table_has_no_data_devices(struct dm_table *table) table 1404 drivers/md/dm-table.c for (i = 0; i < dm_table_get_num_targets(table); i++) { table 1405 drivers/md/dm-table.c ti = dm_table_get_target(table, i); table 1475 drivers/md/dm-table.c static int validate_hardware_zoned_model(struct dm_table *table, table 1482 drivers/md/dm-table.c if (!dm_table_supports_zoned_model(table, zoned_model)) { table 1484 drivers/md/dm-table.c dm_device_name(table->md)); table 1492 drivers/md/dm-table.c if (!dm_table_matches_zone_sectors(table, zone_sectors)) { table 1494 drivers/md/dm-table.c dm_device_name(table->md)); table 1504 drivers/md/dm-table.c int dm_calculate_queue_limits(struct dm_table *table, table 1515 drivers/md/dm-table.c for (i = 0; i < dm_table_get_num_targets(table); i++) { table 1518 drivers/md/dm-table.c ti = dm_table_get_target(table, i); table 1559 drivers/md/dm-table.c dm_device_name(table->md), table 1595 drivers/md/dm-table.c if (validate_hardware_zoned_model(table, zoned_model, zone_sectors)) table 1598 drivers/md/dm-table.c return validate_hardware_logical_block_alignment(table, limits); table 313 drivers/md/dm-thin.c dm_table_event(pool->ti->table); table 1523 drivers/md/dm-thin.c dm_table_event(pool->ti->table); table 3210 drivers/md/dm-thin.c dm_table_event(pool->ti->table); table 3379 drivers/md/dm-thin.c pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, table 3432 drivers/md/dm-thin.c dm_table_add_target_callbacks(ti->table, &pt->callbacks); table 4214 drivers/md/dm-thin.c tc->thin_md = dm_table_get_md(ti->table); table 4236 drivers/md/dm-thin.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); table 173 drivers/md/dm-uevent.c struct mapped_device *md = dm_table_get_md(ti->table); table 76 drivers/md/dm-unstripe.c if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) { table 218 drivers/md/dm-verity-target.c struct mapped_device *md = dm_table_get_md(v->ti->table); table 971 drivers/md/dm-verity-target.c if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) { table 1982 drivers/md/dm-writecache.c r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev); table 1995 drivers/md/dm-writecache.c r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev); table 691 drivers/md/dm-zoned-target.c ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); table 2496 drivers/md/dm.c struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) table 2514 drivers/md/dm.c if (dm_table_has_no_data_devices(table)) { table 2522 drivers/md/dm.c r = dm_calculate_queue_limits(table, &limits); table 2529 drivers/md/dm.c map = __bind(md, table, &limits); table 2977 drivers/md/dm.c return dm_suspended_md(dm_table_get_md(ti->table)); table 2983 drivers/md/dm.c return __noflush_suspending(dm_table_get_md(ti->table)); table 3058 drivers/md/dm.c struct dm_table *table; table 3062 drivers/md/dm.c table = dm_get_live_table(md, &srcu_idx); table 3063 drivers/md/dm.c if (!table || !dm_table_get_size(table)) table 3067 drivers/md/dm.c if (dm_table_get_num_targets(table) != 1) table 3069 drivers/md/dm.c ti = dm_table_get_target(table, 0); table 56 drivers/md/dm.h bool dm_table_has_no_data_devices(struct dm_table *table); table 57 drivers/md/dm.h int dm_calculate_queue_limits(struct dm_table *table, table 187 drivers/media/common/siano/smsdvb-main.c #define convert_from_table(value, table, defval) ({ \ table 189 drivers/media/common/siano/smsdvb-main.c if (value < ARRAY_SIZE(table)) \ table 190 drivers/media/common/siano/smsdvb-main.c __ret = table[value]; \ table 610 drivers/media/dvb-frontends/stv0900_core.c if (INRANGE(lookup->table[imin].regval, agc_gain, table 611 drivers/media/dvb-frontends/stv0900_core.c lookup->table[imax].regval)) { table 615 drivers/media/dvb-frontends/stv0900_core.c if (INRANGE(lookup->table[imin].regval, table 617 drivers/media/dvb-frontends/stv0900_core.c lookup->table[i].regval)) table 623 drivers/media/dvb-frontends/stv0900_core.c rf_lvl = (s32)agc_gain - lookup->table[imin].regval; table 624 drivers/media/dvb-frontends/stv0900_core.c rf_lvl *= (lookup->table[imax].realval - table 625 drivers/media/dvb-frontends/stv0900_core.c lookup->table[imin].realval); table 626 drivers/media/dvb-frontends/stv0900_core.c rf_lvl /= (lookup->table[imax].regval - table 627 drivers/media/dvb-frontends/stv0900_core.c lookup->table[imin].regval); table 628 drivers/media/dvb-frontends/stv0900_core.c rf_lvl += lookup->table[imin].realval; table 629 drivers/media/dvb-frontends/stv0900_core.c } else if (agc_gain > lookup->table[0].regval) table 631 drivers/media/dvb-frontends/stv0900_core.c else if (agc_gain < lookup->table[lookup->size-1].regval) table 698 drivers/media/dvb-frontends/stv0900_core.c if (INRANGE(lookup->table[imin].regval, table 700 drivers/media/dvb-frontends/stv0900_core.c lookup->table[imax].regval)) { table 703 drivers/media/dvb-frontends/stv0900_core.c if (INRANGE(lookup->table[imin].regval, table 705 drivers/media/dvb-frontends/stv0900_core.c lookup->table[i].regval)) table 711 drivers/media/dvb-frontends/stv0900_core.c c_n = ((regval - lookup->table[imin].regval) table 712 drivers/media/dvb-frontends/stv0900_core.c * (lookup->table[imax].realval table 713 drivers/media/dvb-frontends/stv0900_core.c - lookup->table[imin].realval) table 714 drivers/media/dvb-frontends/stv0900_core.c / (lookup->table[imax].regval table 715 drivers/media/dvb-frontends/stv0900_core.c - lookup->table[imin].regval)) table 716 drivers/media/dvb-frontends/stv0900_core.c + lookup->table[imin].realval; table 717 drivers/media/dvb-frontends/stv0900_core.c } else if (regval < lookup->table[imin].regval) table 54 drivers/media/dvb-frontends/stv0900_priv.h struct stv000_lookpoint table[STV0900_MAXLOOKUPSIZE];/* Lookup table */ table 604 drivers/media/dvb-frontends/stv0910.c static s32 table_lookup(const struct slookup *table, table 614 drivers/media/dvb-frontends/stv0910.c if (reg_value >= table[0].reg_value) { table 615 drivers/media/dvb-frontends/stv0910.c value = table[0].value; table 616 drivers/media/dvb-frontends/stv0910.c } else if (reg_value <= table[imax].reg_value) { table 617 drivers/media/dvb-frontends/stv0910.c value = table[imax].value; table 621 drivers/media/dvb-frontends/stv0910.c if ((table[imin].reg_value >= reg_value) && table 622 drivers/media/dvb-frontends/stv0910.c (reg_value >= table[i].reg_value)) table 628 drivers/media/dvb-frontends/stv0910.c reg_diff = table[imax].reg_value - table[imin].reg_value; table 629 drivers/media/dvb-frontends/stv0910.c value = table[imin].value; table 631 drivers/media/dvb-frontends/stv0910.c value += ((s32)(reg_value - table[imin].reg_value) * table 632 drivers/media/dvb-frontends/stv0910.c (s32)(table[imax].value table 633 drivers/media/dvb-frontends/stv0910.c - table[imin].value)) table 541 drivers/media/dvb-frontends/stv6111.c static s32 table_lookup(const struct slookup *table, table 551 drivers/media/dvb-frontends/stv6111.c if (reg_value <= table[0].reg_value) { table 552 drivers/media/dvb-frontends/stv6111.c gain = table[0].value; table 553 drivers/media/dvb-frontends/stv6111.c } else if (reg_value >= table[imax].reg_value) { table 554 drivers/media/dvb-frontends/stv6111.c gain = table[imax].value; table 558 drivers/media/dvb-frontends/stv6111.c if ((table[imin].reg_value <= reg_value) && table 559 drivers/media/dvb-frontends/stv6111.c (reg_value <= table[i].reg_value)) table 564 drivers/media/dvb-frontends/stv6111.c reg_diff = table[imax].reg_value - table[imin].reg_value; table 565 drivers/media/dvb-frontends/stv6111.c gain = table[imin].value; table 567 drivers/media/dvb-frontends/stv6111.c gain += ((s32)(reg_value - table[imin].reg_value) * table 568 drivers/media/dvb-frontends/stv6111.c (s32)(table[imax].value table 569 drivers/media/dvb-frontends/stv6111.c - table[imin].value)) / reg_diff; table 687 drivers/media/i2c/imx214.c const struct reg_8 table[]) table 693 drivers/media/i2c/imx214.c for (; table->addr != IMX214_TABLE_END ; table++) { table 694 drivers/media/i2c/imx214.c if (table->addr == IMX214_TABLE_WAIT_MS) { table 695 drivers/media/i2c/imx214.c usleep_range(table->val * 1000, table 696 drivers/media/i2c/imx214.c table->val * 1000 + 500); table 701 drivers/media/i2c/imx214.c if (table[i].addr != (table[0].addr + i)) table 703 drivers/media/i2c/imx214.c vals[i] = table[i].val; table 706 drivers/media/i2c/imx214.c ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i); table 713 drivers/media/i2c/imx214.c table += i - 1; table 567 drivers/media/i2c/imx274.c static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[]) table 579 drivers/media/i2c/imx274.c for (next = table;; next++) { table 208 drivers/media/i2c/ks0127.c u8 *table = reg_defaults; table 214 drivers/media/i2c/ks0127.c table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */ table 215 drivers/media/i2c/ks0127.c table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */ table 216 drivers/media/i2c/ks0127.c table[KS_CMDC] = 0x00; /* Test options */ table 218 drivers/media/i2c/ks0127.c table[KS_CMDD] = 0x01; table 219 drivers/media/i2c/ks0127.c table[KS_HAVB] = 0x00; /* HAV Start Control */ table 220 drivers/media/i2c/ks0127.c table[KS_HAVE] = 0x00; /* HAV End Control */ table 221 drivers/media/i2c/ks0127.c table[KS_HS1B] = 0x10; /* HS1 Start Control */ table 222 drivers/media/i2c/ks0127.c table[KS_HS1E] = 0x00; /* HS1 End Control */ table 223 drivers/media/i2c/ks0127.c table[KS_HS2B] = 0x00; /* HS2 Start Control */ table 224 drivers/media/i2c/ks0127.c table[KS_HS2E] = 0x00; /* HS2 End Control */ table 225 drivers/media/i2c/ks0127.c table[KS_AGC] = 0x53; /* Manual setting for AGC */ table 226 drivers/media/i2c/ks0127.c table[KS_HXTRA] = 0x00; /* Extra Bits for HAV and HS1/2 */ table 227 drivers/media/i2c/ks0127.c table[KS_CDEM] = 0x00; /* Chroma Demodulation Control */ table 228 drivers/media/i2c/ks0127.c table[KS_PORTAB] = 0x0f; /* port B is input, port A output GPPORT */ table 229 drivers/media/i2c/ks0127.c table[KS_LUMA] = 0x01; /* Luma control */ table 230 drivers/media/i2c/ks0127.c table[KS_CON] = 0x00; /* Contrast Control */ table 231 drivers/media/i2c/ks0127.c table[KS_BRT] = 0x00; /* Brightness Control */ table 232 drivers/media/i2c/ks0127.c table[KS_CHROMA] = 0x2a; /* Chroma control A */ table 233 drivers/media/i2c/ks0127.c table[KS_CHROMB] = 0x90; /* Chroma control B */ table 234 drivers/media/i2c/ks0127.c table[KS_DEMOD] = 0x00; /* Chroma Demodulation Control & Status */ table 235 drivers/media/i2c/ks0127.c table[KS_SAT] = 0x00; /* Color Saturation Control*/ table 236 drivers/media/i2c/ks0127.c table[KS_HUE] = 0x00; /* Hue Control */ table 237 drivers/media/i2c/ks0127.c table[KS_VERTIA] = 0x00; /* Vertical Processing Control A */ table 239 drivers/media/i2c/ks0127.c table[KS_VERTIB] = 0x12; table 240 drivers/media/i2c/ks0127.c table[KS_VERTIC] = 0x0b; /* Vertical Processing Control C */ table 241 drivers/media/i2c/ks0127.c table[KS_HSCLL] = 0x00; /* Horizontal Scaling Ratio Low */ table 242 drivers/media/i2c/ks0127.c table[KS_HSCLH] = 0x00; /* Horizontal Scaling Ratio High */ table 243 drivers/media/i2c/ks0127.c table[KS_VSCLL] = 0x00; /* Vertical Scaling Ratio Low */ table 244 drivers/media/i2c/ks0127.c table[KS_VSCLH] = 0x00; /* Vertical Scaling Ratio High */ table 246 drivers/media/i2c/ks0127.c table[KS_OFMTA] = 0x30; table 247 drivers/media/i2c/ks0127.c table[KS_OFMTB] = 0x00; /* Output Control B */ table 249 drivers/media/i2c/ks0127.c table[KS_VBICTL] = 0x5d; table 250 drivers/media/i2c/ks0127.c table[KS_CCDAT2] = 0x00; /* Read Only register */ table 251 drivers/media/i2c/ks0127.c table[KS_CCDAT1] = 0x00; /* Read Only register */ table 252 drivers/media/i2c/ks0127.c table[KS_VBIL30] = 0xa8; /* VBI data decoding options */ table 253 drivers/media/i2c/ks0127.c table[KS_VBIL74] = 0xaa; /* VBI data decoding options */ table 254 drivers/media/i2c/ks0127.c table[KS_VBIL118] = 0x2a; /* VBI data decoding options */ table 255 drivers/media/i2c/ks0127.c table[KS_VBIL1512] = 0x00; /* VBI data decoding options */ table 256 drivers/media/i2c/ks0127.c table[KS_TTFRAM] = 0x00; /* Teletext frame alignment pattern */ table 257 drivers/media/i2c/ks0127.c table[KS_TESTA] = 0x00; /* test register, shouldn't be written */ table 258 drivers/media/i2c/ks0127.c table[KS_UVOFFH] = 0x00; /* UV Offset Adjustment High */ table 259 drivers/media/i2c/ks0127.c table[KS_UVOFFL] = 0x00; /* UV Offset Adjustment Low */ table 260 drivers/media/i2c/ks0127.c table[KS_UGAIN] = 0x00; /* U Component Gain Adjustment */ table 261 drivers/media/i2c/ks0127.c table[KS_VGAIN] = 0x00; /* V Component Gain Adjustment */ table 262 drivers/media/i2c/ks0127.c table[KS_VAVB] = 0x07; /* VAV Begin */ table 263 drivers/media/i2c/ks0127.c table[KS_VAVE] = 0x00; /* VAV End */ table 264 drivers/media/i2c/ks0127.c table[KS_CTRACK] = 0x00; /* Chroma Tracking Control */ table 265 drivers/media/i2c/ks0127.c table[KS_POLCTL] = 0x41; /* Timing Signal Polarity Control */ table 266 drivers/media/i2c/ks0127.c table[KS_REFCOD] = 0x80; /* Reference Code Insertion Control */ table 267 drivers/media/i2c/ks0127.c table[KS_INVALY] = 0x10; /* Invalid Y Code */ table 268 drivers/media/i2c/ks0127.c table[KS_INVALU] = 0x80; /* Invalid U Code */ table 269 drivers/media/i2c/ks0127.c table[KS_INVALV] = 0x80; /* Invalid V Code */ table 270 drivers/media/i2c/ks0127.c table[KS_UNUSEY] = 0x10; /* Unused Y Code */ table 271 drivers/media/i2c/ks0127.c table[KS_UNUSEU] = 0x80; /* Unused U Code */ table 272 drivers/media/i2c/ks0127.c table[KS_UNUSEV] = 0x80; /* Unused V Code */ table 273 drivers/media/i2c/ks0127.c table[KS_USRSAV] = 0x00; /* reserved */ table 274 drivers/media/i2c/ks0127.c table[KS_USREAV] = 0x00; /* reserved */ table 275 drivers/media/i2c/ks0127.c table[KS_SHS1A] = 0x00; /* User Defined SHS1 A */ table 277 drivers/media/i2c/ks0127.c table[KS_SHS1B] = 0x80; table 278 drivers/media/i2c/ks0127.c table[KS_SHS1C] = 0x00; /* User Defined SHS1 C */ table 279 drivers/media/i2c/ks0127.c table[KS_CMDE] = 0x00; /* Command Register E */ table 280 drivers/media/i2c/ks0127.c table[KS_VSDEL] = 0x00; /* VS Delay Control */ table 283 drivers/media/i2c/ks0127.c table[KS_CMDF] = 0x02; table 359 drivers/media/i2c/ks0127.c u8 *table = reg_defaults; table 369 drivers/media/i2c/ks0127.c ks0127_write(sd, i, table[i]); table 372 drivers/media/i2c/ks0127.c ks0127_write(sd, i, table[i]); table 375 drivers/media/i2c/ks0127.c ks0127_write(sd, i, table[i]); table 378 drivers/media/i2c/ks0127.c ks0127_write(sd, i, table[i]); table 577 drivers/media/pci/pt1/pt1.c static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table) table 582 drivers/media/pci/pt1/pt1.c pt1_cleanup_buffer(pt1, &table->bufs[i]); table 584 drivers/media/pci/pt1/pt1.c pt1_free_page(pt1, table->page, table->addr); table 588 drivers/media/pci/pt1/pt1.c pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp) table 600 drivers/media/pci/pt1/pt1.c ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn); table 608 drivers/media/pci/pt1/pt1.c table->page = page; table 609 drivers/media/pci/pt1/pt1.c table->addr = addr; table 614 drivers/media/pci/pt1/pt1.c pt1_cleanup_buffer(pt1, &table->bufs[i]); table 379 drivers/media/platform/aspeed-video.c static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420) table 386 drivers/media/platform/aspeed-video.c memcpy(&table[base], aspeed_video_jpeg_header, table 390 drivers/media/platform/aspeed-video.c memcpy(&table[base], aspeed_video_jpeg_dct[i], table 394 drivers/media/platform/aspeed-video.c memcpy(&table[base], aspeed_video_jpeg_quant, table 398 drivers/media/platform/aspeed-video.c table[base + 2] = 0x00220103; table 392 drivers/media/platform/davinci/isif.c regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1); table 394 drivers/media/platform/davinci/isif.c regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0); table 422 drivers/media/platform/davinci/isif.c regw(vdfc->table[0].pos_vert, DFCMEM0); table 423 drivers/media/platform/davinci/isif.c regw(vdfc->table[0].pos_horz, DFCMEM1); table 426 drivers/media/platform/davinci/isif.c regw(vdfc->table[0].level_at_pos, DFCMEM2); table 427 drivers/media/platform/davinci/isif.c regw(vdfc->table[0].level_up_pixels, DFCMEM3); table 428 drivers/media/platform/davinci/isif.c regw(vdfc->table[0].level_low_pixels, DFCMEM4); table 445 drivers/media/platform/davinci/isif.c regw(vdfc->table[i].pos_vert, DFCMEM0); table 446 drivers/media/platform/davinci/isif.c regw(vdfc->table[i].pos_horz, DFCMEM1); table 449 drivers/media/platform/davinci/isif.c regw(vdfc->table[i].level_at_pos, DFCMEM2); table 450 drivers/media/platform/davinci/isif.c regw(vdfc->table[i].level_up_pixels, DFCMEM3); table 451 drivers/media/platform/davinci/isif.c regw(vdfc->table[i].level_low_pixels, DFCMEM4); table 323 drivers/media/platform/omap3isp/ispccdc.c ccdc_lsc_program_table(ccdc, req->table.dma); table 358 drivers/media/platform/omap3isp/ispccdc.c if (req->table.addr) { table 359 drivers/media/platform/omap3isp/ispccdc.c sg_free_table(&req->table.sgt); table 360 drivers/media/platform/omap3isp/ispccdc.c dma_free_coherent(isp->dev, req->config.size, req->table.addr, table 361 drivers/media/platform/omap3isp/ispccdc.c req->table.dma); table 435 drivers/media/platform/omap3isp/ispccdc.c req->table.addr = dma_alloc_coherent(isp->dev, req->config.size, table 436 drivers/media/platform/omap3isp/ispccdc.c &req->table.dma, table 438 drivers/media/platform/omap3isp/ispccdc.c if (req->table.addr == NULL) { table 443 drivers/media/platform/omap3isp/ispccdc.c ret = dma_get_sgtable(isp->dev, &req->table.sgt, table 444 drivers/media/platform/omap3isp/ispccdc.c req->table.addr, req->table.dma, table 449 drivers/media/platform/omap3isp/ispccdc.c dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl, table 450 drivers/media/platform/omap3isp/ispccdc.c req->table.sgt.nents, DMA_TO_DEVICE); table 452 drivers/media/platform/omap3isp/ispccdc.c if (copy_from_user(req->table.addr, config->lsc, table 458 drivers/media/platform/omap3isp/ispccdc.c dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl, table 459 drivers/media/platform/omap3isp/ispccdc.c req->table.sgt.nents, DMA_TO_DEVICE); table 58 drivers/media/platform/omap3isp/ispccdc.h } table; table 159 drivers/media/platform/omap3isp/isppreview.c isp_reg_writel(isp, yt->table[i], table 257 drivers/media/platform/omap3isp/isppreview.c const __u32 *block = cfa->table[order[i]]; table 526 drivers/media/platform/omap3isp/isppreview.c isp_reg_writel(isp, nf->table[i], table 1316 drivers/media/platform/omap3isp/isppreview.c memcpy(params->cfa.table, cfa_coef_table, table 1317 drivers/media/platform/omap3isp/isppreview.c sizeof(params->cfa.table)); table 1323 drivers/media/platform/omap3isp/isppreview.c memcpy(params->luma.table, luma_enhance_table, table 1324 drivers/media/platform/omap3isp/isppreview.c sizeof(params->luma.table)); table 1326 drivers/media/platform/omap3isp/isppreview.c memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table)); table 393 drivers/media/platform/qcom/venus/helpers.c const struct freq_tbl *table = core->res->freq_tbl; table 395 drivers/media/platform/qcom/venus/helpers.c unsigned long freq = table[0].freq; table 410 drivers/media/platform/qcom/venus/helpers.c freq = table[num_rows - 1].freq; table 415 drivers/media/platform/qcom/venus/helpers.c if (mbs_per_sec > table[i].load) table 417 drivers/media/platform/qcom/venus/helpers.c freq = table[i].freq; table 927 drivers/media/radio/si4713/si4713.c unsigned long **table, int *size) table 1024 drivers/media/radio/si4713/si4713.c *table = limiter_times; table 1029 drivers/media/radio/si4713/si4713.c *table = acomp_rtimes; table 1034 drivers/media/radio/si4713/si4713.c *table = preemphasis_values; table 1111 drivers/media/radio/si4713/si4713.c unsigned long *table = NULL; table 1184 drivers/media/radio/si4713/si4713.c &mask, &property, &mul, &table, &size); table 1191 drivers/media/radio/si4713/si4713.c } else if (table) { table 1192 drivers/media/radio/si4713/si4713.c ret = usecs_to_dev(val, table, size); table 1127 drivers/media/usb/dvb-usb-v2/af9015.c const struct af9015_rc_setup *table) table 1129 drivers/media/usb/dvb-usb-v2/af9015.c for (; table->rc_codes; table++) table 1130 drivers/media/usb/dvb-usb-v2/af9015.c if (table->id == id) table 1131 drivers/media/usb/dvb-usb-v2/af9015.c return table->rc_codes; table 513 drivers/media/usb/gspca/spca561.c int table[] = { 0, 450, 550, 625, EXPOSURE_MAX }; table 515 drivers/media/usb/gspca/spca561.c for (i = 0; i < ARRAY_SIZE(table) - 1; i++) { table 516 drivers/media/usb/gspca/spca561.c if (val <= table[i + 1]) { table 517 drivers/media/usb/gspca/spca561.c expo = val - table[i]; table 32 drivers/mfd/intel_soc_pmic_core.c .table = { table 1143 drivers/mfd/sm501.c lookup = devm_kzalloc(&pdev->dev, struct_size(lookup, table, 3), table 1150 drivers/mfd/sm501.c lookup->table[0].chip_label = "SM501-LOW"; table 1152 drivers/mfd/sm501.c lookup->table[0].chip_label = "SM501-HIGH"; table 1153 drivers/mfd/sm501.c lookup->table[0].chip_hwnum = iic->pin_sda % 32; table 1154 drivers/mfd/sm501.c lookup->table[0].con_id = NULL; table 1155 drivers/mfd/sm501.c lookup->table[0].idx = 0; table 1156 drivers/mfd/sm501.c lookup->table[0].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN; table 1158 drivers/mfd/sm501.c lookup->table[1].chip_label = "SM501-LOW"; table 1160 drivers/mfd/sm501.c lookup->table[1].chip_label = "SM501-HIGH"; table 1161 drivers/mfd/sm501.c lookup->table[1].chip_hwnum = iic->pin_scl % 32; table 1162 drivers/mfd/sm501.c lookup->table[1].con_id = NULL; table 1163 drivers/mfd/sm501.c lookup->table[1].idx = 1; table 1164 drivers/mfd/sm501.c lookup->table[1].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN; table 64 drivers/misc/eeprom/digsy_mtc_eeprom.c .table = { table 139 drivers/misc/fastrpc.c struct sg_table *table; table 214 drivers/misc/fastrpc.c if (map->table) { table 215 drivers/misc/fastrpc.c dma_buf_unmap_attachment(map->attach, map->table, table 491 drivers/misc/fastrpc.c struct sg_table *table; table 493 drivers/misc/fastrpc.c table = &a->sgt; table 495 drivers/misc/fastrpc.c if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir)) table 498 drivers/misc/fastrpc.c return table; table 502 drivers/misc/fastrpc.c struct sg_table *table, table 505 drivers/misc/fastrpc.c dma_unmap_sg(attach->dev, table->sgl, table->nents, dir); table 623 drivers/misc/fastrpc.c map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); table 624 drivers/misc/fastrpc.c if (IS_ERR(map->table)) { table 625 drivers/misc/fastrpc.c err = PTR_ERR(map->table); table 629 drivers/misc/fastrpc.c map->phys = sg_dma_address(map->table->sgl); table 632 drivers/misc/fastrpc.c map->va = sg_virt(map->table->sgl); table 1380 drivers/misc/mic/scif/scif_api.c struct poll_wqueues table; table 1387 drivers/misc/mic/scif/scif_api.c poll_initwait(&table); table 1388 drivers/misc/mic/scif/scif_api.c pt = &table.pt; table 1403 drivers/misc/mic/scif/scif_api.c count = table.error; table 1413 drivers/misc/mic/scif/scif_api.c poll_freewait(&table); table 149 drivers/mmc/core/quirks.h const struct mmc_fixup *table) table 154 drivers/mmc/core/quirks.h for (f = table; f->vendor_fixup; f++) { table 4997 drivers/mtd/nand/raw/nand_base.c struct nand_flash_dev *table) table 5032 drivers/mtd/nand/raw/nand_base.c ret = nand_detect(chip, table); table 846 drivers/mtd/nand/spi/core.c const struct spinand_info *table, table 853 drivers/mtd/nand/spi/core.c const struct spinand_info *info = &table[i]; table 859 drivers/mtd/nand/spi/core.c nand->memorg = table[i].memorg; table 860 drivers/mtd/nand/spi/core.c nand->eccreq = table[i].eccreq; table 861 drivers/mtd/nand/spi/core.c spinand->eccinfo = table[i].eccinfo; table 862 drivers/mtd/nand/spi/core.c spinand->flags = table[i].flags; table 863 drivers/mtd/nand/spi/core.c spinand->select_target = table[i].select_target; table 219 drivers/mtd/nftlmount.c The new DiskOnChip driver already scanned the bad block table. Just query it. table 541 drivers/mtd/spi-nor/spi-nor.c static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) table 546 drivers/mtd/spi-nor/spi-nor.c if (table[i][0] == opcode) table 547 drivers/mtd/spi-nor/spi-nor.c return table[i][1]; table 2800 drivers/mtd/spi-nor/spi-nor.c static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) table 2805 drivers/mtd/spi-nor/spi-nor.c if (table[i][0] == (int)hwcaps) table 2806 drivers/mtd/spi-nor/spi-nor.c return table[i][1]; table 261 drivers/net/dsa/lantiq_gswip.c u16 table; // PCE_TBL_CTRL.ADDR = pData->table table 520 drivers/net/dsa/lantiq_gswip.c tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, table 562 drivers/net/dsa/lantiq_gswip.c tbl->table | addr_mode, table 573 drivers/net/dsa/lantiq_gswip.c tbl->table | addr_mode, table 612 drivers/net/dsa/lantiq_gswip.c vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; table 626 drivers/net/dsa/lantiq_gswip.c vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; table 874 drivers/net/dsa/lantiq_gswip.c vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; table 898 drivers/net/dsa/lantiq_gswip.c vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; table 937 drivers/net/dsa/lantiq_gswip.c vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; table 943 drivers/net/dsa/lantiq_gswip.c vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; table 1005 drivers/net/dsa/lantiq_gswip.c vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; table 1011 drivers/net/dsa/lantiq_gswip.c vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; table 1070 drivers/net/dsa/lantiq_gswip.c vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; table 1239 drivers/net/dsa/lantiq_gswip.c mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; table 1325 drivers/net/dsa/lantiq_gswip.c mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; table 1364 drivers/net/dsa/lantiq_gswip.c mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; table 1541 drivers/net/dsa/lantiq_gswip.c static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, table 1550 drivers/net/dsa/lantiq_gswip.c table | GSWIP_BM_RAM_CTRL_BAS, table 1557 drivers/net/dsa/lantiq_gswip.c table, index); table 238 drivers/net/dsa/microchip/ksz8795.c static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr, table 243 drivers/net/dsa/microchip/ksz8795.c ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; table 251 drivers/net/dsa/microchip/ksz8795.c static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr, table 256 drivers/net/dsa/microchip/ksz8795.c ctrl_addr = IND_ACC_TABLE(table) | addr; table 423 drivers/net/dsa/microchip/ksz8795.c dev->vlan_cache[addr + i].table[0] = (u16)data; table 454 drivers/net/dsa/microchip/ksz8795.c dev->vlan_cache[vid].table[0] = vlan; table 893 drivers/net/dsa/microchip/ksz8795_reg.h #define IND_ACC_TABLE(table) ((table) << 8) table 152 drivers/net/dsa/microchip/ksz9477.c dev->vlan_cache[vid].table[0] = vlan_table[0]; table 153 drivers/net/dsa/microchip/ksz9477.c dev->vlan_cache[vid].table[1] = vlan_table[1]; table 154 drivers/net/dsa/microchip/ksz9477.c dev->vlan_cache[vid].table[2] = vlan_table[2]; table 162 drivers/net/dsa/microchip/ksz9477.c static void ksz9477_read_table(struct ksz_device *dev, u32 *table) table 164 drivers/net/dsa/microchip/ksz9477.c ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); table 165 drivers/net/dsa/microchip/ksz9477.c ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); table 166 drivers/net/dsa/microchip/ksz9477.c ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); table 167 drivers/net/dsa/microchip/ksz9477.c ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); table 170 drivers/net/dsa/microchip/ksz9477.c static void ksz9477_write_table(struct ksz_device *dev, u32 *table) table 172 drivers/net/dsa/microchip/ksz9477.c ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); table 173 drivers/net/dsa/microchip/ksz9477.c ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); table 174 drivers/net/dsa/microchip/ksz9477.c ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); table 175 drivers/net/dsa/microchip/ksz9477.c ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); table 18 drivers/net/dsa/microchip/ksz_common.h u32 table[3]; table 1302 drivers/net/dsa/mv88e6xxx/port.c int port, u16 table, u8 ptr, u16 data) table 1306 drivers/net/dsa/mv88e6xxx/port.c reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE | table | table 1317 drivers/net/dsa/mv88e6xxx/port.c u16 table; table 1320 drivers/net/dsa/mv88e6xxx/port.c table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP; table 1321 drivers/net/dsa/mv88e6xxx/port.c err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, table 1326 drivers/net/dsa/mv88e6xxx/port.c table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP; table 1327 drivers/net/dsa/mv88e6xxx/port.c err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i); table 1331 drivers/net/dsa/mv88e6xxx/port.c table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP; table 1332 drivers/net/dsa/mv88e6xxx/port.c err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i); table 1336 drivers/net/dsa/mv88e6xxx/port.c table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP; table 1337 drivers/net/dsa/mv88e6xxx/port.c err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i); table 103 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 106 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; table 109 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 110 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 111 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 114 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_NUM_PORTS, table 115 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 116 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 119 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_NUM_PORTS; table 121 drivers/net/dsa/sja1105/sja1105_main.c mac = table->entries; table 143 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 146 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS]; table 149 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 150 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 151 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 154 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, table 155 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 156 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 160 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT; table 162 drivers/net/dsa/sja1105/sja1105_main.c mii = table->entries; table 190 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 192 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; table 197 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 198 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 199 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 206 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 241 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; table 243 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 244 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 245 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 248 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, table 249 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 250 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 253 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT; table 256 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = table 264 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 275 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; table 281 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 282 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 283 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 286 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(1, table->ops->unpacked_entry_size, table 288 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 291 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 1; table 302 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; table 309 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 312 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; table 314 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 315 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 316 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 319 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, table 320 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 321 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 324 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT; table 326 drivers/net/dsa/sja1105/sja1105_main.c l2fwd = table->entries; table 359 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 361 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; table 363 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 364 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 365 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 368 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, table 369 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 370 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 373 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT; table 376 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = table 428 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 438 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; table 440 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 441 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 442 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 445 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, table 446 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 447 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 450 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT; table 453 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_general_params_entry *)table->entries)[0] = table 475 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 478 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_POLICING]; table 481 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 482 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 483 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 486 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, table 487 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 488 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 491 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_L2_POLICING_COUNT; table 493 drivers/net/dsa/sja1105/sja1105_main.c policing = table->entries; table 514 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 516 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; table 519 drivers/net/dsa/sja1105/sja1105_main.c if (table->entry_count) { table 520 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); table 521 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = 0; table 528 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, table 529 drivers/net/dsa/sja1105/sja1105_main.c table->ops->unpacked_entry_size, GFP_KERNEL); table 530 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) table 533 drivers/net/dsa/sja1105/sja1105_main.c table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; table 535 drivers/net/dsa/sja1105/sja1105_main.c avb = table->entries; table 882 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 885 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; table 886 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup = table->entries; table 888 drivers/net/dsa/sja1105/sja1105_main.c for (i = 0; i < table->entry_count; i++) table 908 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 911 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; table 920 drivers/net/dsa/sja1105/sja1105_main.c rc = sja1105_table_resize(table, table->entry_count + 1); table 924 drivers/net/dsa/sja1105/sja1105_main.c match = table->entry_count - 1; table 928 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup = table->entries; table 944 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup[match] = l2_lookup[table->entry_count - 1]; table 945 drivers/net/dsa/sja1105/sja1105_main.c return sja1105_table_resize(table, table->entry_count - 1); table 1462 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 1466 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; table 1473 drivers/net/dsa/sja1105/sja1105_main.c rc = sja1105_table_resize(table, table->entry_count + 1); table 1476 drivers/net/dsa/sja1105/sja1105_main.c match = table->entry_count - 1; table 1479 drivers/net/dsa/sja1105/sja1105_main.c vlan = table->entries; table 1513 drivers/net/dsa/sja1105/sja1105_main.c return sja1105_table_delete_entry(table, match); table 1557 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 1571 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; table 1572 drivers/net/dsa/sja1105/sja1105_main.c general_params = table->entries; table 1601 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; table 1602 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup_params = table->entries; table 1888 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 1891 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; table 1892 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup_params = table->entries; table 1911 drivers/net/dsa/sja1105/sja1105_main.c struct sja1105_table *table; table 1914 drivers/net/dsa/sja1105/sja1105_main.c table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; table 1915 drivers/net/dsa/sja1105/sja1105_main.c general_params = table->entries; table 634 drivers/net/dsa/sja1105/sja1105_static_config.c const struct sja1105_table *table; table 637 drivers/net/dsa/sja1105/sja1105_static_config.c table = &config->tables[i]; table 638 drivers/net/dsa/sja1105/sja1105_static_config.c if (!table->entry_count) table 642 drivers/net/dsa/sja1105/sja1105_static_config.c header.len = table->entry_count * table 643 drivers/net/dsa/sja1105/sja1105_static_config.c table->ops->packed_entry_size / 4; table 647 drivers/net/dsa/sja1105/sja1105_static_config.c for (j = 0; j < table->entry_count; j++) { table 648 drivers/net/dsa/sja1105/sja1105_static_config.c u8 *entry_ptr = table->entries; table 650 drivers/net/dsa/sja1105/sja1105_static_config.c entry_ptr += j * table->ops->unpacked_entry_size; table 651 drivers/net/dsa/sja1105/sja1105_static_config.c memset(p, 0, table->ops->packed_entry_size); table 652 drivers/net/dsa/sja1105/sja1105_static_config.c table->ops->packing(p, entry_ptr, PACK); table 653 drivers/net/dsa/sja1105/sja1105_static_config.c p += table->ops->packed_entry_size; table 683 drivers/net/dsa/sja1105/sja1105_static_config.c const struct sja1105_table *table; table 685 drivers/net/dsa/sja1105/sja1105_static_config.c table = &config->tables[i]; table 686 drivers/net/dsa/sja1105/sja1105_static_config.c if (table->entry_count) table 689 drivers/net/dsa/sja1105/sja1105_static_config.c sum += table->ops->packed_entry_size * table->entry_count; table 1199 drivers/net/dsa/sja1105/sja1105_static_config.c int sja1105_table_delete_entry(struct sja1105_table *table, int i) table 1201 drivers/net/dsa/sja1105/sja1105_static_config.c size_t entry_size = table->ops->unpacked_entry_size; table 1202 drivers/net/dsa/sja1105/sja1105_static_config.c u8 *entries = table->entries; table 1204 drivers/net/dsa/sja1105/sja1105_static_config.c if (i > table->entry_count) table 1208 drivers/net/dsa/sja1105/sja1105_static_config.c (table->entry_count - i) * entry_size); table 1210 drivers/net/dsa/sja1105/sja1105_static_config.c table->entry_count--; table 1216 drivers/net/dsa/sja1105/sja1105_static_config.c int sja1105_table_resize(struct sja1105_table *table, size_t new_count) table 1218 drivers/net/dsa/sja1105/sja1105_static_config.c size_t entry_size = table->ops->unpacked_entry_size; table 1219 drivers/net/dsa/sja1105/sja1105_static_config.c void *new_entries, *old_entries = table->entries; table 1221 drivers/net/dsa/sja1105/sja1105_static_config.c if (new_count > table->ops->max_entry_count) table 1228 drivers/net/dsa/sja1105/sja1105_static_config.c memcpy(new_entries, old_entries, min(new_count, table->entry_count) * table 1231 drivers/net/dsa/sja1105/sja1105_static_config.c table->entries = new_entries; table 1232 drivers/net/dsa/sja1105/sja1105_static_config.c table->entry_count = new_count; table 326 drivers/net/dsa/sja1105/sja1105_static_config.h int sja1105_table_delete_entry(struct sja1105_table *table, int i); table 327 drivers/net/dsa/sja1105/sja1105_static_config.h int sja1105_table_resize(struct sja1105_table *table, size_t new_count); table 94 drivers/net/dsa/sja1105/sja1105_tas.c struct sja1105_table *table; table 105 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; table 106 drivers/net/dsa/sja1105/sja1105_tas.c if (table->entry_count) { table 107 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); table 108 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = 0; table 112 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS]; table 113 drivers/net/dsa/sja1105/sja1105_tas.c if (table->entry_count) { table 114 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); table 115 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = 0; table 119 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS]; table 120 drivers/net/dsa/sja1105/sja1105_tas.c if (table->entry_count) { table 121 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); table 122 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = 0; table 126 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS]; table 127 drivers/net/dsa/sja1105/sja1105_tas.c if (table->entry_count) { table 128 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); table 129 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = 0; table 147 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; table 148 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size, table 150 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) table 152 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = num_entries; table 153 drivers/net/dsa/sja1105/sja1105_tas.c schedule = table->entries; table 156 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS]; table 157 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, table 158 drivers/net/dsa/sja1105/sja1105_tas.c table->ops->unpacked_entry_size, GFP_KERNEL); table 159 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) table 165 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT; table 166 drivers/net/dsa/sja1105/sja1105_tas.c schedule_entry_points_params = table->entries; table 169 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS]; table 170 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT, table 171 drivers/net/dsa/sja1105/sja1105_tas.c table->ops->unpacked_entry_size, GFP_KERNEL); table 172 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) table 174 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT; table 175 drivers/net/dsa/sja1105/sja1105_tas.c schedule_params = table->entries; table 178 drivers/net/dsa/sja1105/sja1105_tas.c table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS]; table 179 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size, table 181 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) table 183 drivers/net/dsa/sja1105/sja1105_tas.c table->entry_count = num_cycles; table 184 drivers/net/dsa/sja1105/sja1105_tas.c schedule_entry_points = table->entries; table 399 drivers/net/ethernet/amd/xgbe/xgbe-dev.c const u32 *table) table 404 drivers/net/ethernet/amd/xgbe/xgbe-dev.c XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); table 474 drivers/net/ethernet/atheros/ag71xx.c const u32 *table; table 482 drivers/net/ethernet/atheros/ag71xx.c table = ar933x_mdio_div_table; table 485 drivers/net/ethernet/atheros/ag71xx.c table = ar7240_mdio_div_table; table 488 drivers/net/ethernet/atheros/ag71xx.c table = ar71xx_mdio_div_table; table 495 drivers/net/ethernet/atheros/ag71xx.c t = ref_clock / table[i]; table 918 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c struct bnx2x_admin_priority_app_table *table = table 920 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c if ((ETH_TYPE_FCOE == table[i].app_id) && table 921 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c (TRAFFIC_TYPE_ETH == table[i].traffic_type)) table 923 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c else if ((TCP_PORT_ISCSI == table[i].app_id) && table 924 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c (TRAFFIC_TYPE_PORT == table[i].traffic_type)) table 930 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[i].app_id; table 933 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c (u8)(1 << table[i].priority); table 939 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c (TRAFFIC_TYPE_ETH == table[i].traffic_type) ? table 2509 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c struct dcb_app *table) table 2521 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); table 2522 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); table 2523 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[j++].protocol = ent->app_id; table 663 drivers/net/ethernet/broadcom/cnic.c id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); table 664 drivers/net/ethernet/broadcom/cnic.c if (!id_tbl->table) table 672 drivers/net/ethernet/broadcom/cnic.c kfree(id_tbl->table); table 673 drivers/net/ethernet/broadcom/cnic.c id_tbl->table = NULL; table 685 drivers/net/ethernet/broadcom/cnic.c if (!test_bit(id, id_tbl->table)) { table 686 drivers/net/ethernet/broadcom/cnic.c set_bit(id, id_tbl->table); table 699 drivers/net/ethernet/broadcom/cnic.c id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); table 703 drivers/net/ethernet/broadcom/cnic.c id = find_first_zero_bit(id_tbl->table, id_tbl->next); table 710 drivers/net/ethernet/broadcom/cnic.c set_bit(id, id_tbl->table); table 729 drivers/net/ethernet/broadcom/cnic.c clear_bit(id, id_tbl->table); table 145 drivers/net/ethernet/broadcom/cnic.h unsigned long *table; table 529 drivers/net/ethernet/brocade/bna/bfi_enet.h u8 table[BFI_ENET_RSS_RIT_MAX]; table 305 drivers/net/ethernet/brocade/bna/bna_tx_rx.c memcpy(&req->table[0], rxf->rit, rxf->rit_size); table 1144 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) table 1170 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1); table 1171 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c table[i].protocol = table 1173 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c table[i].priority = table 203 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (!t->table[uhtid - 1].link_handle) table 207 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c link_start = t->table[uhtid - 1].match_field; table 222 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (t->table[link_uhtid - 1].link_handle) { table 269 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c link = &t->table[link_uhtid - 1]; table 287 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) { table 289 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs)); table 333 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) table 334 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c set_bit(filter_id, t->table[uhtid - 1].tid_map); table 374 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c link = &t->table[uhtid - 1]; table 394 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c link = &t->table[i]; table 431 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c struct cxgb4_link *link = &t->table[i]; table 447 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL); table 454 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c struct cxgb4_link *link = &t->table[i]; table 469 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c struct cxgb4_link *link = &t->table[i]; table 292 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h struct cxgb4_link table[0]; /* Jump table */ table 245 drivers/net/ethernet/freescale/enetc/enetc.h int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); table 246 drivers/net/ethernet/freescale/enetc/enetc.h int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); table 154 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, table 177 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c tmp_align[i] = (u8)(table[i]); table 193 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c table[i] = tmp_align[i]; table 201 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count) table 203 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c return enetc_cmd_rss_table(si, table, count, true); table 207 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) table 209 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c return enetc_cmd_rss_table(si, (u32 *)table, count, false); table 975 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c u32 table[4]; table 990 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c table[j] = n; table 993 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c reta = table[0] | table 994 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c (table[1] << 8) | table 995 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c (table[2] << 16) | table 996 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c (table[3] << 24); table 831 drivers/net/ethernet/intel/ice/ice_lib.c u32 table = 0; table 848 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(0, 0); table 849 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(1, 1); table 850 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(2, 2); table 851 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(3, 3); table 852 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(4, 4); table 853 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(5, 5); table 854 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(6, 6); table 855 drivers/net/ethernet/intel/ice/ice_lib.c table |= ICE_UP_TABLE_TRANSLATE(7, 7); table 856 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.ingress_table = cpu_to_le32(table); table 857 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.egress_table = cpu_to_le32(table); table 859 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.outer_up_table = cpu_to_le32(table); table 1875 drivers/net/ethernet/marvell/mv643xx_eth.c u32 *table; table 1879 drivers/net/ethernet/marvell/mv643xx_eth.c table = mc_spec; table 1882 drivers/net/ethernet/marvell/mv643xx_eth.c table = mc_other; table 1886 drivers/net/ethernet/marvell/mv643xx_eth.c table[entry >> 2] |= 1 << (8 * (entry & 3)); table 1464 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c struct mvpp2_rss_table *table, table 1476 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c mvpp22_rxfh_indir(port, table->indir[i])); table 1702 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c struct mvpp2_rss_table *table; table 1713 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c table = mvpp22_rss_table_get(port->priv, context); table 1714 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c if (!table) table 1723 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); table 1725 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0)); table 50 drivers/net/ethernet/mellanox/mlx4/alloc.c obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); table 54 drivers/net/ethernet/mellanox/mlx4/alloc.c obj = find_first_zero_bit(bitmap->table, bitmap->max); table 58 drivers/net/ethernet/mellanox/mlx4/alloc.c set_bit(obj, bitmap->table); table 119 drivers/net/ethernet/mellanox/mlx4/alloc.c obj = find_aligned_range(bitmap->table, bitmap->last, table 124 drivers/net/ethernet/mellanox/mlx4/alloc.c obj = find_aligned_range(bitmap->table, 0, bitmap->max, table 129 drivers/net/ethernet/mellanox/mlx4/alloc.c bitmap_set(bitmap->table, obj, cnt); table 168 drivers/net/ethernet/mellanox/mlx4/alloc.c bitmap_clear(bitmap->table, obj, cnt); table 188 drivers/net/ethernet/mellanox/mlx4/alloc.c bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL); table 189 drivers/net/ethernet/mellanox/mlx4/alloc.c if (!bitmap->table) table 192 drivers/net/ethernet/mellanox/mlx4/alloc.c bitmap_set(bitmap->table, 0, reserved_bot); table 199 drivers/net/ethernet/mellanox/mlx4/alloc.c bitmap_free(bitmap->table); table 865 drivers/net/ethernet/mellanox/mlx4/cmd.c static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, table 873 drivers/net/ethernet/mellanox/mlx4/cmd.c err = query_pkey_block(dev, port, i, table + i, inbox, outbox); table 901 drivers/net/ethernet/mellanox/mlx4/cmd.c u16 *table; table 927 drivers/net/ethernet/mellanox/mlx4/cmd.c table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, table 928 drivers/net/ethernet/mellanox/mlx4/cmd.c sizeof(*table) * 32, GFP_KERNEL); table 930 drivers/net/ethernet/mellanox/mlx4/cmd.c if (!table) table 935 drivers/net/ethernet/mellanox/mlx4/cmd.c err = get_full_pkey_table(dev, port, table, inbox, outbox); table 939 drivers/net/ethernet/mellanox/mlx4/cmd.c outtab[vidx % 32] = cpu_to_be16(table[pidx]); table 942 drivers/net/ethernet/mellanox/mlx4/cmd.c kfree(table); table 227 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_table_get(dev, &cq_table->table, *cqn); table 237 drivers/net/ethernet/mellanox/mlx4/cq.c mlx4_table_put(dev, &cq_table->table, *cqn); table 270 drivers/net/ethernet/mellanox/mlx4/cq.c mlx4_table_put(dev, &cq_table->table, cqn); table 258 drivers/net/ethernet/mellanox/mlx4/icm.c int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) table 260 drivers/net/ethernet/mellanox/mlx4/icm.c u32 i = (obj & (table->num_obj - 1)) / table 261 drivers/net/ethernet/mellanox/mlx4/icm.c (MLX4_TABLE_CHUNK_SIZE / table->obj_size); table 264 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_lock(&table->mutex); table 266 drivers/net/ethernet/mellanox/mlx4/icm.c if (table->icm[i]) { table 267 drivers/net/ethernet/mellanox/mlx4/icm.c ++table->icm[i]->refcount; table 271 drivers/net/ethernet/mellanox/mlx4/icm.c table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, table 272 drivers/net/ethernet/mellanox/mlx4/icm.c (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | table 273 drivers/net/ethernet/mellanox/mlx4/icm.c __GFP_NOWARN, table->coherent); table 274 drivers/net/ethernet/mellanox/mlx4/icm.c if (!table->icm[i]) { table 279 drivers/net/ethernet/mellanox/mlx4/icm.c if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + table 281 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm(dev, table->icm[i], table->coherent); table 282 drivers/net/ethernet/mellanox/mlx4/icm.c table->icm[i] = NULL; table 287 drivers/net/ethernet/mellanox/mlx4/icm.c ++table->icm[i]->refcount; table 290 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_unlock(&table->mutex); table 294 drivers/net/ethernet/mellanox/mlx4/icm.c void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) table 299 drivers/net/ethernet/mellanox/mlx4/icm.c i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); table 301 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_lock(&table->mutex); table 303 drivers/net/ethernet/mellanox/mlx4/icm.c if (--table->icm[i]->refcount == 0) { table 305 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_UNMAP_ICM(dev, table->virt + offset, table 307 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm(dev, table->icm[i], table->coherent); table 308 drivers/net/ethernet/mellanox/mlx4/icm.c table->icm[i] = NULL; table 311 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_unlock(&table->mutex); table 314 drivers/net/ethernet/mellanox/mlx4/icm.c void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, table 323 drivers/net/ethernet/mellanox/mlx4/icm.c if (!table->lowmem) table 326 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_lock(&table->mutex); table 328 drivers/net/ethernet/mellanox/mlx4/icm.c idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; table 329 drivers/net/ethernet/mellanox/mlx4/icm.c icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; table 340 drivers/net/ethernet/mellanox/mlx4/icm.c if (table->coherent) { table 378 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_unlock(&table->mutex); table 382 drivers/net/ethernet/mellanox/mlx4/icm.c int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, table 385 drivers/net/ethernet/mellanox/mlx4/icm.c int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; table 390 drivers/net/ethernet/mellanox/mlx4/icm.c err = mlx4_table_get(dev, table, i); table 400 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_table_put(dev, table, i); table 406 drivers/net/ethernet/mellanox/mlx4/icm.c void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, table 411 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) table 412 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_table_put(dev, table, i); table 415 drivers/net/ethernet/mellanox/mlx4/icm.c int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table 430 drivers/net/ethernet/mellanox/mlx4/icm.c table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); table 431 drivers/net/ethernet/mellanox/mlx4/icm.c if (!table->icm) table 433 drivers/net/ethernet/mellanox/mlx4/icm.c table->virt = virt; table 434 drivers/net/ethernet/mellanox/mlx4/icm.c table->num_icm = num_icm; table 435 drivers/net/ethernet/mellanox/mlx4/icm.c table->num_obj = nobj; table 436 drivers/net/ethernet/mellanox/mlx4/icm.c table->obj_size = obj_size; table 437 drivers/net/ethernet/mellanox/mlx4/icm.c table->lowmem = use_lowmem; table 438 drivers/net/ethernet/mellanox/mlx4/icm.c table->coherent = use_coherent; table 439 drivers/net/ethernet/mellanox/mlx4/icm.c mutex_init(&table->mutex); table 448 drivers/net/ethernet/mellanox/mlx4/icm.c table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, table 451 drivers/net/ethernet/mellanox/mlx4/icm.c if (!table->icm[i]) table 453 drivers/net/ethernet/mellanox/mlx4/icm.c if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { table 454 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm(dev, table->icm[i], use_coherent); table 455 drivers/net/ethernet/mellanox/mlx4/icm.c table->icm[i] = NULL; table 463 drivers/net/ethernet/mellanox/mlx4/icm.c ++table->icm[i]->refcount; table 470 drivers/net/ethernet/mellanox/mlx4/icm.c if (table->icm[i]) { table 473 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm(dev, table->icm[i], use_coherent); table 476 drivers/net/ethernet/mellanox/mlx4/icm.c kvfree(table->icm); table 481 drivers/net/ethernet/mellanox/mlx4/icm.c void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) table 485 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < table->num_icm; ++i) table 486 drivers/net/ethernet/mellanox/mlx4/icm.c if (table->icm[i]) { table 487 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, table 489 drivers/net/ethernet/mellanox/mlx4/icm.c mlx4_free_icm(dev, table->icm[i], table->coherent); table 492 drivers/net/ethernet/mellanox/mlx4/icm.c kvfree(table->icm); table 84 drivers/net/ethernet/mellanox/mlx4/icm.h int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); table 85 drivers/net/ethernet/mellanox/mlx4/icm.h void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); table 86 drivers/net/ethernet/mellanox/mlx4/icm.h int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, table 88 drivers/net/ethernet/mellanox/mlx4/icm.h void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, table 90 drivers/net/ethernet/mellanox/mlx4/icm.h int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table 93 drivers/net/ethernet/mellanox/mlx4/icm.h void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); table 94 drivers/net/ethernet/mellanox/mlx4/icm.h void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle); table 1680 drivers/net/ethernet/mellanox/mlx4/main.c err = mlx4_init_icm_table(dev, &priv->eq_table.table, table 1763 drivers/net/ethernet/mellanox/mlx4/main.c err = mlx4_init_icm_table(dev, &priv->cq_table.table, table 1773 drivers/net/ethernet/mellanox/mlx4/main.c err = mlx4_init_icm_table(dev, &priv->srq_table.table, table 1790 drivers/net/ethernet/mellanox/mlx4/main.c err = mlx4_init_icm_table(dev, &priv->mcg_table.table, table 1804 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->srq_table.table); table 1807 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->cq_table.table); table 1828 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->eq_table.table); table 1849 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); table 1850 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->srq_table.table); table 1851 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->cq_table.table); table 1858 drivers/net/ethernet/mellanox/mlx4/main.c mlx4_cleanup_icm_table(dev, &priv->eq_table.table); table 248 drivers/net/ethernet/mellanox/mlx4/mlx4.h unsigned long *table; table 683 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct mlx4_icm_table table; table 694 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct mlx4_icm_table table; table 704 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct mlx4_icm_table table; table 732 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct mlx4_icm_table table; table 1246 drivers/net/ethernet/mellanox/mlx4/mlx4.h void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); table 1247 drivers/net/ethernet/mellanox/mlx4/mlx4.h void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); table 1249 drivers/net/ethernet/mellanox/mlx4/mlx4.h struct mlx4_roce_gid_table *table); table 62 drivers/net/ethernet/mellanox/mlx4/port.c void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) table 66 drivers/net/ethernet/mellanox/mlx4/port.c mutex_init(&table->mutex); table 68 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[i] = 0; table 69 drivers/net/ethernet/mellanox/mlx4/port.c table->refs[i] = 0; table 70 drivers/net/ethernet/mellanox/mlx4/port.c table->is_dup[i] = false; table 72 drivers/net/ethernet/mellanox/mlx4/port.c table->max = 1 << dev->caps.log_num_macs; table 73 drivers/net/ethernet/mellanox/mlx4/port.c table->total = 0; table 76 drivers/net/ethernet/mellanox/mlx4/port.c void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) table 80 drivers/net/ethernet/mellanox/mlx4/port.c mutex_init(&table->mutex); table 82 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[i] = 0; table 83 drivers/net/ethernet/mellanox/mlx4/port.c table->refs[i] = 0; table 84 drivers/net/ethernet/mellanox/mlx4/port.c table->is_dup[i] = false; table 86 drivers/net/ethernet/mellanox/mlx4/port.c table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; table 87 drivers/net/ethernet/mellanox/mlx4/port.c table->total = 0; table 91 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_roce_gid_table *table) table 95 drivers/net/ethernet/mellanox/mlx4/port.c mutex_init(&table->mutex); table 97 drivers/net/ethernet/mellanox/mlx4/port.c memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); table 101 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_mac_table *table, int index) table 105 drivers/net/ethernet/mellanox/mlx4/port.c if (index < 0 || index >= table->max || !table->entries[index]) { table 113 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_mac_table *table, u64 mac) table 118 drivers/net/ethernet/mellanox/mlx4/port.c if (table->refs[i] && table 120 drivers/net/ethernet/mellanox/mlx4/port.c (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) table 153 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_mac_table *table = &info->mac_table; table 157 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[i]) table 160 drivers/net/ethernet/mellanox/mlx4/port.c if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { table 185 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_mac_table *table = &info->mac_table; table 201 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 205 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); table 208 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 216 drivers/net/ethernet/mellanox/mlx4/port.c if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))) table 242 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[index_at_dup_port] || table 243 drivers/net/ethernet/mellanox/mlx4/port.c ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port])))) table 251 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[i]) { table 262 drivers/net/ethernet/mellanox/mlx4/port.c (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { table 265 drivers/net/ethernet/mellanox/mlx4/port.c ++table->refs[i]; table 292 drivers/net/ethernet/mellanox/mlx4/port.c if (table->total == table->max) { table 299 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); table 301 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_mac_table(dev, port, table->entries); table 305 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = 0; table 308 drivers/net/ethernet/mellanox/mlx4/port.c table->refs[free] = 1; table 309 drivers/net/ethernet/mellanox/mlx4/port.c table->is_dup[free] = false; table 310 drivers/net/ethernet/mellanox/mlx4/port.c ++table->total; table 329 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 333 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 336 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 382 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_mac_table *table; table 393 drivers/net/ethernet/mellanox/mlx4/port.c table = &info->mac_table; table 397 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 401 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); table 404 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 407 drivers/net/ethernet/mellanox/mlx4/port.c index = find_index(dev, table, mac); table 409 drivers/net/ethernet/mellanox/mlx4/port.c if (validate_index(dev, table, index)) table 412 drivers/net/ethernet/mellanox/mlx4/port.c if (--table->refs[index] || table->is_dup[index]) { table 415 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[index]) table 420 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = 0; table 421 drivers/net/ethernet/mellanox/mlx4/port.c if (mlx4_set_port_mac_table(dev, port, table->entries)) table 423 drivers/net/ethernet/mellanox/mlx4/port.c --table->total; table 433 drivers/net/ethernet/mellanox/mlx4/port.c --table->total; table 438 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 442 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 445 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 477 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_mac_table *table = &info->mac_table; table 487 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 491 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); table 494 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 497 drivers/net/ethernet/mellanox/mlx4/port.c err = validate_index(dev, table, index); table 501 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); table 503 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_mac_table(dev, port, table->entries); table 507 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = 0; table 523 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 527 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 530 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 560 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; table 564 drivers/net/ethernet/mellanox/mlx4/port.c if (table->refs[i] && table 566 drivers/net/ethernet/mellanox/mlx4/port.c be32_to_cpu(table->entries[i])))) { table 580 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; table 596 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 600 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); table 603 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 606 drivers/net/ethernet/mellanox/mlx4/port.c if (table->total == table->max) { table 617 drivers/net/ethernet/mellanox/mlx4/port.c if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))) table 642 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[index_at_dup_port] || table 651 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[i]) { table 660 drivers/net/ethernet/mellanox/mlx4/port.c if ((table->refs[i] || table->is_dup[i]) && table 662 drivers/net/ethernet/mellanox/mlx4/port.c be32_to_cpu(table->entries[i])))) { table 666 drivers/net/ethernet/mellanox/mlx4/port.c ++table->refs[i]; table 697 drivers/net/ethernet/mellanox/mlx4/port.c table->refs[free] = 1; table 698 drivers/net/ethernet/mellanox/mlx4/port.c table->is_dup[free] = false; table 699 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); table 701 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_vlan_table(dev, port, table->entries); table 704 drivers/net/ethernet/mellanox/mlx4/port.c table->refs[free] = 0; table 705 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = 0; table 708 drivers/net/ethernet/mellanox/mlx4/port.c ++table->total; table 728 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 732 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 735 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 764 drivers/net/ethernet/mellanox/mlx4/port.c struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; table 772 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 776 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); table 779 drivers/net/ethernet/mellanox/mlx4/port.c mutex_lock(&table->mutex); table 792 drivers/net/ethernet/mellanox/mlx4/port.c if (--table->refs[index] || table->is_dup[index]) { table 794 drivers/net/ethernet/mellanox/mlx4/port.c table->refs[index], index); table 795 drivers/net/ethernet/mellanox/mlx4/port.c if (!table->refs[index]) table 799 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = 0; table 800 drivers/net/ethernet/mellanox/mlx4/port.c if (mlx4_set_port_vlan_table(dev, port, table->entries)) table 802 drivers/net/ethernet/mellanox/mlx4/port.c --table->total; table 815 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 819 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 822 drivers/net/ethernet/mellanox/mlx4/port.c mutex_unlock(&table->mutex); table 103 drivers/net/ethernet/mellanox/mlx4/srq.c err = mlx4_table_get(dev, &srq_table->table, *srqn); table 113 drivers/net/ethernet/mellanox/mlx4/srq.c mlx4_table_put(dev, &srq_table->table, *srqn); table 143 drivers/net/ethernet/mellanox/mlx4/srq.c mlx4_table_put(dev, &srq_table->table, srqn); table 161 drivers/net/ethernet/mellanox/mlx5/core/en/port.c const u32 *table; table 166 drivers/net/ethernet/mellanox/mlx5/core/en/port.c mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy); table 169 drivers/net/ethernet/mellanox/mlx5/core/en/port.c speed = table[i]; table 202 drivers/net/ethernet/mellanox/mlx5/core/en/port.c const u32 *table; table 213 drivers/net/ethernet/mellanox/mlx5/core/en/port.c mlx5e_port_get_speed_arr(mdev, &table, &max_size, false); table 216 drivers/net/ethernet/mellanox/mlx5/core/en/port.c max_speed = max(max_speed, table[i]); table 226 drivers/net/ethernet/mellanox/mlx5/core/en/port.c const u32 *table; table 230 drivers/net/ethernet/mellanox/mlx5/core/en/port.c mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy); table 232 drivers/net/ethernet/mellanox/mlx5/core/en/port.c if (table[i] == speed) table 72 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c #define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, table, ...) \ table 77 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c cfg = &ptys2##table##_ethtool_table[reg_]; \ table 610 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c struct ptys2ethtool_config *table; table 614 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); table 617 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c table[proto].supported, table 625 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c struct ptys2ethtool_config *table; table 629 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; table 635 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c table[proto].advertised, table 116 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_cq_table *table = &eq->cq_table; table 120 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq = radix_tree_lookup(&table->tree, cqn); table 385 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_cq_table *table = &eq->cq_table; table 388 drivers/net/ethernet/mellanox/mlx5/core/eq.c spin_lock(&table->lock); table 389 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = radix_tree_insert(&table->tree, cq->cqn, cq); table 390 drivers/net/ethernet/mellanox/mlx5/core/eq.c spin_unlock(&table->lock); table 397 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_cq_table *table = &eq->cq_table; table 400 drivers/net/ethernet/mellanox/mlx5/core/eq.c spin_lock(&table->lock); table 401 drivers/net/ethernet/mellanox/mlx5/core/eq.c tmp = radix_tree_delete(&table->tree, cq->cqn); table 402 drivers/net/ethernet/mellanox/mlx5/core/eq.c spin_unlock(&table->lock); table 568 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 572 drivers/net/ethernet/mellanox/mlx5/core/eq.c MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); table 573 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_notifier_register(dev, &table->cq_err_nb); table 575 drivers/net/ethernet/mellanox/mlx5/core/eq.c table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; table 582 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = create_async_eq(dev, &table->cmd_eq.core, ¶m); table 587 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); table 594 drivers/net/ethernet/mellanox/mlx5/core/eq.c table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; table 601 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = create_async_eq(dev, &table->async_eq.core, ¶m); table 606 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = mlx5_eq_enable(dev, &table->async_eq.core, table 607 drivers/net/ethernet/mellanox/mlx5/core/eq.c &table->async_eq.irq_nb); table 613 drivers/net/ethernet/mellanox/mlx5/core/eq.c table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; table 620 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = create_async_eq(dev, &table->pages_eq.core, ¶m); table 625 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = mlx5_eq_enable(dev, &table->pages_eq.core, table 626 drivers/net/ethernet/mellanox/mlx5/core/eq.c &table->pages_eq.irq_nb); table 635 drivers/net/ethernet/mellanox/mlx5/core/eq.c destroy_async_eq(dev, &table->pages_eq.core); table 637 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); table 639 drivers/net/ethernet/mellanox/mlx5/core/eq.c destroy_async_eq(dev, &table->async_eq.core); table 642 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); table 644 drivers/net/ethernet/mellanox/mlx5/core/eq.c destroy_async_eq(dev, &table->cmd_eq.core); table 646 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); table 652 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 655 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb); table 656 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = destroy_async_eq(dev, &table->pages_eq.core); table 661 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); table 662 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = destroy_async_eq(dev, &table->async_eq.core); table 669 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); table 670 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = destroy_async_eq(dev, &table->cmd_eq.core); table 675 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); table 766 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 769 drivers/net/ethernet/mellanox/mlx5/core/eq.c list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { table 782 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 789 drivers/net/ethernet/mellanox/mlx5/core/eq.c INIT_LIST_HEAD(&table->comp_eqs_list); table 790 drivers/net/ethernet/mellanox/mlx5/core/eq.c ncomp_eqs = table->num_comp_eqs; table 827 drivers/net/ethernet/mellanox/mlx5/core/eq.c list_add_tail(&eq->list, &table->comp_eqs_list); table 840 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 845 drivers/net/ethernet/mellanox/mlx5/core/eq.c list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { table 883 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 886 drivers/net/ethernet/mellanox/mlx5/core/eq.c list_for_each_entry(eq, &table->comp_eqs_list, list) { table 897 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_table *table = dev->priv.eq_table; table 899 drivers/net/ethernet/mellanox/mlx5/core/eq.c mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ table 901 drivers/net/ethernet/mellanox/mlx5/core/eq.c mutex_unlock(&table->lock); table 172 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table); table 173 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_irq_get_num_comp(struct mlx5_irq_table *table); table 46 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c int mlx5_irq_get_num_comp(struct mlx5_irq_table *table) table 48 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c return table->nvec - MLX5_IRQ_VEC_COMP_BASE; table 253 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c struct mlx5_irq_table *table = dev->priv.irq_table; table 256 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < table->nvec; i++) table 264 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c struct mlx5_irq_table *table = priv->irq_table; table 277 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL); table 278 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c if (!table->irq) table 288 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c table->nvec = nvec; table 313 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c kfree(table->irq); table 319 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c struct mlx5_irq_table *table = dev->priv.irq_table; table 328 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < table->nvec; i++) table 332 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c kfree(table->irq); table 47 drivers/net/ethernet/mellanox/mlx5/core/qp.c mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) table 52 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_lock_irqsave(&table->lock, flags); table 54 drivers/net/ethernet/mellanox/mlx5/core/qp.c common = radix_tree_lookup(&table->tree, rsn); table 58 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_unlock_irqrestore(&table->lock, flags); table 126 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_qp_table *table; table 157 drivers/net/ethernet/mellanox/mlx5/core/qp.c table = container_of(nb, struct mlx5_qp_table, nb); table 158 drivers/net/ethernet/mellanox/mlx5/core/qp.c priv = container_of(table, struct mlx5_priv, qp_table); table 163 drivers/net/ethernet/mellanox/mlx5/core/qp.c common = mlx5_get_rsc(table, rsn); table 200 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_qp_table *table = &dev->priv.qp_table; table 204 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_lock_irq(&table->lock); table 205 drivers/net/ethernet/mellanox/mlx5/core/qp.c err = radix_tree_insert(&table->tree, table 208 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_unlock_irq(&table->lock); table 222 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_qp_table *table = &dev->priv.qp_table; table 225 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_lock_irqsave(&table->lock, flags); table 226 drivers/net/ethernet/mellanox/mlx5/core/qp.c radix_tree_delete(&table->tree, table 228 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_unlock_irqrestore(&table->lock, flags); table 526 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_qp_table *table = &dev->priv.qp_table; table 528 drivers/net/ethernet/mellanox/mlx5/core/qp.c memset(table, 0, sizeof(*table)); table 529 drivers/net/ethernet/mellanox/mlx5/core/qp.c spin_lock_init(&table->lock); table 530 drivers/net/ethernet/mellanox/mlx5/core/qp.c INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); table 533 drivers/net/ethernet/mellanox/mlx5/core/qp.c table->nb.notifier_call = rsc_event_notifier; table 534 drivers/net/ethernet/mellanox/mlx5/core/qp.c mlx5_notifier_register(dev, &table->nb); table 539 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_qp_table *table = &dev->priv.qp_table; table 541 drivers/net/ethernet/mellanox/mlx5/core/qp.c mlx5_notifier_unregister(dev, &table->nb); table 727 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_qp_table *table = &dev->priv.qp_table; table 729 drivers/net/ethernet/mellanox/mlx5/core/qp.c return mlx5_get_rsc(table, rsn); table 109 drivers/net/ethernet/mellanox/mlx5/core/rl.c static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, table 116 drivers/net/ethernet/mellanox/mlx5/core/rl.c for (i = 0; i < table->max_size; i++) { table 117 drivers/net/ethernet/mellanox/mlx5/core/rl.c if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl)) table 118 drivers/net/ethernet/mellanox/mlx5/core/rl.c return &table->rl_entry[i]; table 119 drivers/net/ethernet/mellanox/mlx5/core/rl.c if (!empty_found && !table->rl_entry[i].rl.rate) { table 121 drivers/net/ethernet/mellanox/mlx5/core/rl.c ret_entry = &table->rl_entry[i]; table 146 drivers/net/ethernet/mellanox/mlx5/core/rl.c struct mlx5_rl_table *table = &dev->priv.rl_table; table 148 drivers/net/ethernet/mellanox/mlx5/core/rl.c return (rate <= table->max_rate && rate >= table->min_rate); table 164 drivers/net/ethernet/mellanox/mlx5/core/rl.c struct mlx5_rl_table *table = &dev->priv.rl_table; table 168 drivers/net/ethernet/mellanox/mlx5/core/rl.c mutex_lock(&table->rl_lock); table 172 drivers/net/ethernet/mellanox/mlx5/core/rl.c rl->rate, table->min_rate, table->max_rate); table 177 drivers/net/ethernet/mellanox/mlx5/core/rl.c entry = find_rl_entry(table, rl); table 180 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->max_size); table 202 drivers/net/ethernet/mellanox/mlx5/core/rl.c mutex_unlock(&table->rl_lock); table 209 drivers/net/ethernet/mellanox/mlx5/core/rl.c struct mlx5_rl_table *table = &dev->priv.rl_table; table 217 drivers/net/ethernet/mellanox/mlx5/core/rl.c mutex_lock(&table->rl_lock); table 218 drivers/net/ethernet/mellanox/mlx5/core/rl.c entry = find_rl_entry(table, rl); table 233 drivers/net/ethernet/mellanox/mlx5/core/rl.c mutex_unlock(&table->rl_lock); table 239 drivers/net/ethernet/mellanox/mlx5/core/rl.c struct mlx5_rl_table *table = &dev->priv.rl_table; table 242 drivers/net/ethernet/mellanox/mlx5/core/rl.c mutex_init(&table->rl_lock); table 244 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->max_size = 0; table 249 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; table 250 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); table 251 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); table 253 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry), table 255 drivers/net/ethernet/mellanox/mlx5/core/rl.c if (!table->rl_entry) table 261 drivers/net/ethernet/mellanox/mlx5/core/rl.c for (i = 0; i < table->max_size; i++) table 262 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->rl_entry[i].index = i + 1; table 266 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->max_size, table 267 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->min_rate >> 10, table 268 drivers/net/ethernet/mellanox/mlx5/core/rl.c table->max_rate >> 10); table 275 drivers/net/ethernet/mellanox/mlx5/core/rl.c struct mlx5_rl_table *table = &dev->priv.rl_table; table 280 drivers/net/ethernet/mellanox/mlx5/core/rl.c for (i = 0; i < table->max_size; i++) table 281 drivers/net/ethernet/mellanox/mlx5/core/rl.c if (table->rl_entry[i].rl.rate) table 282 drivers/net/ethernet/mellanox/mlx5/core/rl.c mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index, table 51 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h int mlx5dr_table_destroy(struct mlx5dr_table *table); table 53 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h u32 mlx5dr_table_get_id(struct mlx5dr_table *table); table 56 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_matcher_create(struct mlx5dr_table *table, table 75 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_action_create_dest_table(struct mlx5dr_table *table); table 137 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; } table 140 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; } table 143 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_matcher_create(struct mlx5dr_table *table, table 165 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; } table 1666 drivers/net/ethernet/micrel/ksz884x.c static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data) table 1671 drivers/net/ethernet/micrel/ksz884x.c ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr; table 1693 drivers/net/ethernet/micrel/ksz884x.c static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi, table 1699 drivers/net/ethernet/micrel/ksz884x.c ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr; table 176 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table) table 180 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c for (i = 0; i < table->count; i++) { table 181 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->max_index = max(table->max_index, table->ports[i].index); table 183 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c for (j = 0; j < table->count; j++) { table 184 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (table->ports[i].label_port != table 185 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->ports[j].label_port) table 187 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->ports[i].port_lanes += table->ports[j].lanes; table 191 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (table->ports[i].label_subport == table 192 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->ports[j].label_subport) table 195 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->ports[i].label_port, table 196 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->ports[i].label_subport); table 198 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->ports[i].is_split = true; table 248 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c struct nfp_eth_table *table; table 275 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table = kzalloc(struct_size(table, ports, cnt), GFP_KERNEL); table 276 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (!table) table 279 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c table->count = cnt; table 283 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c &table->ports[j++]); table 285 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c nfp_eth_calc_port_geometry(cpp, table); table 286 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c for (i = 0; i < table->count; i++) table 287 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c nfp_eth_calc_port_type(cpp, &table->ports[i]); table 291 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c return table; table 1990 drivers/net/ethernet/qlogic/qed/qed_dcbx.c struct dcb_app *table) table 2002 drivers/net/ethernet/qlogic/qed/qed_dcbx.c table[i].selector = DCB_APP_IDTYPE_ETHTYPE; table 2004 drivers/net/ethernet/qlogic/qed/qed_dcbx.c table[i].selector = DCB_APP_IDTYPE_PORTNUM; table 2005 drivers/net/ethernet/qlogic/qed/qed_dcbx.c table[i].priority = dcbx_info->remote.params.app_entry[i].prio; table 2006 drivers/net/ethernet/qlogic/qed/qed_dcbx.c table[i].protocol = table 1040 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c struct dcb_app *table) table 1057 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c table[j].selector = app->selector; table 1058 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c table[j].priority = app->priority; table 1059 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c table[j++].protocol = app->protocol; table 4025 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, table 4028 drivers/net/ethernet/sfc/ef10.c return (struct efx_filter_spec *)(table->entry[filter_idx].spec & table 4033 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, table 4036 drivers/net/ethernet/sfc/ef10.c return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; table 4040 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, table 4045 drivers/net/ethernet/sfc/ef10.c table->entry[filter_idx].spec = (unsigned long)spec | flags; table 4273 drivers/net/ethernet/sfc/ef10.c static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, table 4280 drivers/net/ethernet/sfc/ef10.c match_pri < table->rx_match_count; table 4282 drivers/net/ethernet/sfc/ef10.c if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) table 4294 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4308 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4309 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 4318 drivers/net/ethernet/sfc/ef10.c rc = efx_ef10_filter_pri(table, spec); table 4350 drivers/net/ethernet/sfc/ef10.c saved_spec = efx_ef10_filter_entry_spec(table, i); table 4393 drivers/net/ethernet/sfc/ef10.c saved_spec = efx_ef10_filter_entry_spec(table, ins_index); table 4400 drivers/net/ethernet/sfc/ef10.c table->entry[ins_index].spec &= table 4406 drivers/net/ethernet/sfc/ef10.c priv_flags = efx_ef10_filter_entry_flags(table, ins_index); table 4416 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); table 4419 drivers/net/ethernet/sfc/ef10.c rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, table 4451 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); table 4467 drivers/net/ethernet/sfc/ef10.c saved_spec = efx_ef10_filter_entry_spec(table, i); table 4468 drivers/net/ethernet/sfc/ef10.c priv_flags = efx_ef10_filter_entry_flags(table, i); table 4474 drivers/net/ethernet/sfc/ef10.c table->entry[i].handle); table 4485 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_set_entry(table, i, saved_spec, table 4497 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 4531 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 4539 drivers/net/ethernet/sfc/ef10.c spec = efx_ef10_filter_entry_spec(table, filter_idx); table 4542 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_pri(table, spec) != table 4550 drivers/net/ethernet/sfc/ef10.c table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; table 4569 drivers/net/ethernet/sfc/ef10.c &table->entry[filter_idx].handle, table 4583 drivers/net/ethernet/sfc/ef10.c table->entry[filter_idx].handle); table 4590 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); table 4605 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4609 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4610 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 4613 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 4623 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 4628 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 4631 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 4640 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4644 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4645 drivers/net/ethernet/sfc/ef10.c down_read(&table->lock); table 4646 drivers/net/ethernet/sfc/ef10.c saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); table 4648 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_pri(table, saved_spec) == table 4655 drivers/net/ethernet/sfc/ef10.c up_read(&table->lock); table 4663 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4672 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4673 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 4682 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 4690 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4695 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4696 drivers/net/ethernet/sfc/ef10.c down_read(&table->lock); table 4698 drivers/net/ethernet/sfc/ef10.c if (table->entry[filter_idx].spec && table 4699 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_entry_spec(table, filter_idx)->priority == table 4703 drivers/net/ethernet/sfc/ef10.c up_read(&table->lock); table 4710 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 4712 drivers/net/ethernet/sfc/ef10.c return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; table 4719 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4725 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4726 drivers/net/ethernet/sfc/ef10.c down_read(&table->lock); table 4729 drivers/net/ethernet/sfc/ef10.c spec = efx_ef10_filter_entry_spec(table, filter_idx); table 4737 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_pri(table, spec), table 4741 drivers/net/ethernet/sfc/ef10.c up_read(&table->lock); table 4752 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4758 drivers/net/ethernet/sfc/ef10.c table = efx->filter_state; table 4759 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 4760 drivers/net/ethernet/sfc/ef10.c spec = efx_ef10_filter_entry_spec(table, filter_idx); table 4809 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 4875 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 4882 drivers/net/ethernet/sfc/ef10.c if (!table) table 4885 drivers/net/ethernet/sfc/ef10.c list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) table 4889 drivers/net/ethernet/sfc/ef10.c static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, table 4897 drivers/net/ethernet/sfc/ef10.c match_pri < table->rx_match_count; table 4900 drivers/net/ethernet/sfc/ef10.c table->rx_match_mcdi_flags[match_pri]); table 4910 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table, table 4948 drivers/net/ethernet/sfc/ef10.c rc, table->rx_match_count); table 4949 drivers/net/ethernet/sfc/ef10.c table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; table 4950 drivers/net/ethernet/sfc/ef10.c table->rx_match_count++; table 4961 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table; table 4971 drivers/net/ethernet/sfc/ef10.c table = kzalloc(sizeof(*table), GFP_KERNEL); table 4972 drivers/net/ethernet/sfc/ef10.c if (!table) table 4975 drivers/net/ethernet/sfc/ef10.c table->rx_match_count = 0; table 4976 drivers/net/ethernet/sfc/ef10.c rc = efx_ef10_filter_table_probe_matches(efx, table, false); table 4981 drivers/net/ethernet/sfc/ef10.c rc = efx_ef10_filter_table_probe_matches(efx, table, true); table 4985 drivers/net/ethernet/sfc/ef10.c !(efx_ef10_filter_match_supported(table, false, table 4987 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_match_supported(table, false, table 4996 drivers/net/ethernet/sfc/ef10.c table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS, table 4997 drivers/net/ethernet/sfc/ef10.c sizeof(*table->entry))); table 4998 drivers/net/ethernet/sfc/ef10.c if (!table->entry) { table 5003 drivers/net/ethernet/sfc/ef10.c table->mc_promisc_last = false; table 5004 drivers/net/ethernet/sfc/ef10.c table->vlan_filter = table 5006 drivers/net/ethernet/sfc/ef10.c INIT_LIST_HEAD(&table->vlan_list); table 5007 drivers/net/ethernet/sfc/ef10.c init_rwsem(&table->lock); table 5009 drivers/net/ethernet/sfc/ef10.c efx->filter_state = table; table 5023 drivers/net/ethernet/sfc/ef10.c kfree(table); table 5032 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5048 drivers/net/ethernet/sfc/ef10.c if (!table) table 5051 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 5055 drivers/net/ethernet/sfc/ef10.c spec = efx_ef10_filter_entry_spec(table, filter_idx); table 5061 drivers/net/ethernet/sfc/ef10.c while (match_pri < table->rx_match_count && table 5062 drivers/net/ethernet/sfc/ef10.c table->rx_match_mcdi_flags[match_pri] != mcdi_flags) table 5064 drivers/net/ethernet/sfc/ef10.c if (match_pri >= table->rx_match_count) { table 5090 drivers/net/ethernet/sfc/ef10.c &table->entry[filter_idx].handle, table 5097 drivers/net/ethernet/sfc/ef10.c list_for_each_entry(vlan, &table->vlan_list, list) table 5104 drivers/net/ethernet/sfc/ef10.c efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); table 5109 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 5128 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5145 drivers/net/ethernet/sfc/ef10.c if (!table) table 5149 drivers/net/ethernet/sfc/ef10.c spec = efx_ef10_filter_entry_spec(table, filter_idx); table 5158 drivers/net/ethernet/sfc/ef10.c table->entry[filter_idx].handle); table 5168 drivers/net/ethernet/sfc/ef10.c vfree(table->entry); table 5169 drivers/net/ethernet/sfc/ef10.c kfree(table); table 5174 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5177 drivers/net/ethernet/sfc/ef10.c efx_rwsem_assert_write_locked(&table->lock); table 5181 drivers/net/ethernet/sfc/ef10.c if (!table->entry[filter_idx].spec) table 5185 drivers/net/ethernet/sfc/ef10.c table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; table 5194 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5197 drivers/net/ethernet/sfc/ef10.c for (i = 0; i < table->dev_uc_count; i++) table 5199 drivers/net/ethernet/sfc/ef10.c for (i = 0; i < table->dev_mc_count; i++) table 5211 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5214 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 5215 drivers/net/ethernet/sfc/ef10.c list_for_each_entry(vlan, &table->vlan_list, list) table 5217 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 5222 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5227 drivers/net/ethernet/sfc/ef10.c table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); table 5228 drivers/net/ethernet/sfc/ef10.c ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); table 5232 drivers/net/ethernet/sfc/ef10.c table->uc_promisc = true; table 5235 drivers/net/ethernet/sfc/ef10.c ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); table 5239 drivers/net/ethernet/sfc/ef10.c table->dev_uc_count = i; table 5244 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5249 drivers/net/ethernet/sfc/ef10.c table->mc_overflow = false; table 5250 drivers/net/ethernet/sfc/ef10.c table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); table 5255 drivers/net/ethernet/sfc/ef10.c table->mc_promisc = true; table 5256 drivers/net/ethernet/sfc/ef10.c table->mc_overflow = true; table 5259 drivers/net/ethernet/sfc/ef10.c ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); table 5263 drivers/net/ethernet/sfc/ef10.c table->dev_mc_count = i; table 5270 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5281 drivers/net/ethernet/sfc/ef10.c addr_list = table->dev_mc_list; table 5282 drivers/net/ethernet/sfc/ef10.c addr_count = table->dev_mc_count; table 5285 drivers/net/ethernet/sfc/ef10.c addr_list = table->dev_uc_list; table 5286 drivers/net/ethernet/sfc/ef10.c addr_count = table->dev_uc_count; table 5497 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5503 drivers/net/ethernet/sfc/ef10.c down_write(&table->lock); table 5505 drivers/net/ethernet/sfc/ef10.c if (READ_ONCE(table->entry[i].spec) & table 5515 drivers/net/ethernet/sfc/ef10.c up_write(&table->lock); table 5599 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5605 drivers/net/ethernet/sfc/ef10.c if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) table 5609 drivers/net/ethernet/sfc/ef10.c if (table->uc_promisc) { table 5644 drivers/net/ethernet/sfc/ef10.c table->mc_promisc_last != table->mc_promisc) table 5646 drivers/net/ethernet/sfc/ef10.c if (table->mc_promisc) { table 5667 drivers/net/ethernet/sfc/ef10.c if (!table->mc_overflow) table 5710 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5718 drivers/net/ethernet/sfc/ef10.c if (!table) table 5736 drivers/net/ethernet/sfc/ef10.c if (table->vlan_filter != vlan_filter) { table 5737 drivers/net/ethernet/sfc/ef10.c table->vlan_filter = vlan_filter; table 5741 drivers/net/ethernet/sfc/ef10.c list_for_each_entry(vlan, &table->vlan_list, list) table 5745 drivers/net/ethernet/sfc/ef10.c table->mc_promisc_last = table->mc_promisc; table 5750 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5755 drivers/net/ethernet/sfc/ef10.c list_for_each_entry(vlan, &table->vlan_list, list) { table 5765 drivers/net/ethernet/sfc/ef10.c struct efx_ef10_filter_table *table = efx->filter_state; table 5792 drivers/net/ethernet/sfc/ef10.c list_add_tail(&vlan->list, &table->vlan_list); table 1819 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT]; table 1824 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table, table 1873 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 1878 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; table 1880 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_TCP_FULL] + table 1883 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_TCP_WILD] + table 1886 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_UDP_FULL] + table 1889 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_UDP_WILD] + table 1892 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC]; table 1893 drivers/net/ethernet/sfc/falcon/farch.c if (table->size) { table 1896 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_MAC_FULL] + table 1900 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_MAC_WILD] + table 1904 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF]; table 1905 drivers/net/ethernet/sfc/falcon/farch.c if (table->size) { table 1908 drivers/net/ethernet/sfc/falcon/farch.c table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); table 1911 drivers/net/ethernet/sfc/falcon/farch.c !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags & table 1915 drivers/net/ethernet/sfc/falcon/farch.c table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); table 1918 drivers/net/ethernet/sfc/falcon/farch.c !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags & table 1927 drivers/net/ethernet/sfc/falcon/farch.c !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags & table 1928 drivers/net/ethernet/sfc/falcon/farch.c table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags & table 1947 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 1952 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC]; table 1953 drivers/net/ethernet/sfc/falcon/farch.c if (table->size) { table 1956 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_MAC_FULL] + table 1960 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[EF4_FARCH_FILTER_MAC_WILD] + table 2315 drivers/net/ethernet/sfc/falcon/farch.c if (state->table[table_id].size != 0) table 2317 drivers/net/ethernet/sfc/falcon/farch.c state->table[table_id].size; table 2328 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2339 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[ef4_farch_filter_spec_table_id(&spec)]; table 2340 drivers/net/ethernet/sfc/falcon/farch.c if (table->size == 0) table 2345 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[spec.type]); table 2347 drivers/net/ethernet/sfc/falcon/farch.c if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) { table 2375 drivers/net/ethernet/sfc/falcon/farch.c unsigned int max_rep_depth = table->search_limit[spec.type]; table 2380 drivers/net/ethernet/sfc/falcon/farch.c unsigned int i = hash & (table->size - 1); table 2388 drivers/net/ethernet/sfc/falcon/farch.c if (!test_bit(i, table->used_bitmap)) { table 2392 drivers/net/ethernet/sfc/falcon/farch.c &table->spec[i])) { table 2411 drivers/net/ethernet/sfc/falcon/farch.c i = (i + incr) & (table->size - 1); table 2421 drivers/net/ethernet/sfc/falcon/farch.c &table->spec[rep_index]; table 2438 drivers/net/ethernet/sfc/falcon/farch.c __set_bit(ins_index, table->used_bitmap); table 2439 drivers/net/ethernet/sfc/falcon/farch.c ++table->used; table 2441 drivers/net/ethernet/sfc/falcon/farch.c table->spec[ins_index] = spec; table 2443 drivers/net/ethernet/sfc/falcon/farch.c if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) { table 2446 drivers/net/ethernet/sfc/falcon/farch.c if (table->search_limit[spec.type] < depth) { table 2447 drivers/net/ethernet/sfc/falcon/farch.c table->search_limit[spec.type] = depth; table 2455 drivers/net/ethernet/sfc/falcon/farch.c table->offset + table->step * ins_index); table 2461 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_filter_table_clear_entry(efx, table, table 2477 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table, table 2482 drivers/net/ethernet/sfc/falcon/farch.c EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); table 2483 drivers/net/ethernet/sfc/falcon/farch.c BUG_ON(table->offset == 0); /* can't clear MAC default filters */ table 2485 drivers/net/ethernet/sfc/falcon/farch.c __clear_bit(filter_idx, table->used_bitmap); table 2486 drivers/net/ethernet/sfc/falcon/farch.c --table->used; table 2487 drivers/net/ethernet/sfc/falcon/farch.c memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); table 2489 drivers/net/ethernet/sfc/falcon/farch.c ef4_writeo(efx, &filter, table->offset + table->step * filter_idx); table 2497 drivers/net/ethernet/sfc/falcon/farch.c if (unlikely(table->used == 0)) { table 2498 drivers/net/ethernet/sfc/falcon/farch.c memset(table->search_limit, 0, sizeof(table->search_limit)); table 2499 drivers/net/ethernet/sfc/falcon/farch.c if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC) table 2507 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table, table 2511 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_spec *spec = &table->spec[filter_idx]; table 2513 drivers/net/ethernet/sfc/falcon/farch.c if (!test_bit(filter_idx, table->used_bitmap) || table 2521 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_filter_table_clear_entry(efx, table, filter_idx); table 2533 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2541 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2544 drivers/net/ethernet/sfc/falcon/farch.c if (filter_idx >= table->size) table 2546 drivers/net/ethernet/sfc/falcon/farch.c spec = &table->spec[filter_idx]; table 2549 drivers/net/ethernet/sfc/falcon/farch.c rc = ef4_farch_filter_remove(efx, table, filter_idx, priority); table 2561 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2569 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2572 drivers/net/ethernet/sfc/falcon/farch.c if (filter_idx >= table->size) table 2574 drivers/net/ethernet/sfc/falcon/farch.c spec = &table->spec[filter_idx]; table 2578 drivers/net/ethernet/sfc/falcon/farch.c if (test_bit(filter_idx, table->used_bitmap) && table 2597 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table = &state->table[table_id]; table 2601 drivers/net/ethernet/sfc/falcon/farch.c for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { table 2602 drivers/net/ethernet/sfc/falcon/farch.c if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO) table 2603 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_filter_remove(efx, table, table 2626 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2635 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2636 drivers/net/ethernet/sfc/falcon/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2637 drivers/net/ethernet/sfc/falcon/farch.c if (test_bit(filter_idx, table->used_bitmap) && table 2638 drivers/net/ethernet/sfc/falcon/farch.c table->spec[filter_idx].priority == priority) table 2654 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2663 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2664 drivers/net/ethernet/sfc/falcon/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2665 drivers/net/ethernet/sfc/falcon/farch.c if (test_bit(filter_idx, table->used_bitmap) && table 2666 drivers/net/ethernet/sfc/falcon/farch.c table->spec[filter_idx].priority == priority) { table 2672 drivers/net/ethernet/sfc/falcon/farch.c &table->spec[filter_idx], filter_idx); table 2687 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2694 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2697 drivers/net/ethernet/sfc/falcon/farch.c if (table->step == 0) table 2700 drivers/net/ethernet/sfc/falcon/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2701 drivers/net/ethernet/sfc/falcon/farch.c if (!test_bit(filter_idx, table->used_bitmap)) table 2703 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_filter_build(&filter, &table->spec[filter_idx]); table 2705 drivers/net/ethernet/sfc/falcon/farch.c table->offset + table->step * filter_idx); table 2721 drivers/net/ethernet/sfc/falcon/farch.c kfree(state->table[table_id].used_bitmap); table 2722 drivers/net/ethernet/sfc/falcon/farch.c vfree(state->table[table_id].spec); table 2730 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2739 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; table 2740 drivers/net/ethernet/sfc/falcon/farch.c table->id = EF4_FARCH_FILTER_TABLE_RX_IP; table 2741 drivers/net/ethernet/sfc/falcon/farch.c table->offset = FR_BZ_RX_FILTER_TBL0; table 2742 drivers/net/ethernet/sfc/falcon/farch.c table->size = FR_BZ_RX_FILTER_TBL0_ROWS; table 2743 drivers/net/ethernet/sfc/falcon/farch.c table->step = FR_BZ_RX_FILTER_TBL0_STEP; table 2747 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2748 drivers/net/ethernet/sfc/falcon/farch.c if (table->size == 0) table 2750 drivers/net/ethernet/sfc/falcon/farch.c table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), table 2753 drivers/net/ethernet/sfc/falcon/farch.c if (!table->used_bitmap) table 2755 drivers/net/ethernet/sfc/falcon/farch.c table->spec = vzalloc(array_size(sizeof(*table->spec), table 2756 drivers/net/ethernet/sfc/falcon/farch.c table->size)); table 2757 drivers/net/ethernet/sfc/falcon/farch.c if (!table->spec) table 2761 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF]; table 2762 drivers/net/ethernet/sfc/falcon/farch.c if (table->size) { table 2768 drivers/net/ethernet/sfc/falcon/farch.c spec = &table->spec[i]; table 2771 drivers/net/ethernet/sfc/falcon/farch.c __set_bit(i, table->used_bitmap); table 2789 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table; table 2798 drivers/net/ethernet/sfc/falcon/farch.c table = &state->table[table_id]; table 2800 drivers/net/ethernet/sfc/falcon/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2801 drivers/net/ethernet/sfc/falcon/farch.c if (!test_bit(filter_idx, table->used_bitmap) || table 2802 drivers/net/ethernet/sfc/falcon/farch.c table->spec[filter_idx].dmaq_id >= table 2807 drivers/net/ethernet/sfc/falcon/farch.c table->spec[filter_idx].flags |= table 2810 drivers/net/ethernet/sfc/falcon/farch.c table->spec[filter_idx].flags &= table 2817 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_filter_build(&filter, &table->spec[filter_idx]); table 2819 drivers/net/ethernet/sfc/falcon/farch.c table->offset + table->step * filter_idx); table 2840 drivers/net/ethernet/sfc/falcon/farch.c struct ef4_farch_filter_table *table = table 2841 drivers/net/ethernet/sfc/falcon/farch.c &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; table 2843 drivers/net/ethernet/sfc/falcon/farch.c if (test_bit(index, table->used_bitmap) && table 2844 drivers/net/ethernet/sfc/falcon/farch.c table->spec[index].priority == EF4_FILTER_PRI_HINT && table 2845 drivers/net/ethernet/sfc/falcon/farch.c rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, table 2847 drivers/net/ethernet/sfc/falcon/farch.c ef4_farch_filter_table_clear_entry(efx, table, index); table 364 drivers/net/ethernet/sfc/falcon/nic.c const struct ef4_nic_reg_table *table; table 374 drivers/net/ethernet/sfc/falcon/nic.c for (table = ef4_nic_reg_tables; table 375 drivers/net/ethernet/sfc/falcon/nic.c table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables); table 376 drivers/net/ethernet/sfc/falcon/nic.c table++) table 377 drivers/net/ethernet/sfc/falcon/nic.c if (efx->type->revision >= table->min_revision && table 378 drivers/net/ethernet/sfc/falcon/nic.c efx->type->revision <= table->max_revision) table 379 drivers/net/ethernet/sfc/falcon/nic.c len += table->rows * min_t(size_t, table->step, 16); table 387 drivers/net/ethernet/sfc/falcon/nic.c const struct ef4_nic_reg_table *table; table 399 drivers/net/ethernet/sfc/falcon/nic.c for (table = ef4_nic_reg_tables; table 400 drivers/net/ethernet/sfc/falcon/nic.c table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables); table 401 drivers/net/ethernet/sfc/falcon/nic.c table++) { table 404 drivers/net/ethernet/sfc/falcon/nic.c if (!(efx->type->revision >= table->min_revision && table 405 drivers/net/ethernet/sfc/falcon/nic.c efx->type->revision <= table->max_revision)) table 408 drivers/net/ethernet/sfc/falcon/nic.c size = min_t(size_t, table->step, 16); table 410 drivers/net/ethernet/sfc/falcon/nic.c for (i = 0; i < table->rows; i++) { table 411 drivers/net/ethernet/sfc/falcon/nic.c switch (table->step) { table 413 drivers/net/ethernet/sfc/falcon/nic.c ef4_readd(efx, buf, table->offset + 4 * i); table 417 drivers/net/ethernet/sfc/falcon/nic.c efx->membase + table->offset, table 421 drivers/net/ethernet/sfc/falcon/nic.c ef4_reado_table(efx, buf, table->offset, i); table 424 drivers/net/ethernet/sfc/falcon/nic.c ef4_reado_table(efx, buf, table->offset, 2 * i); table 1879 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; table 1884 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table, table 1933 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 1938 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; table 1940 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + table 1943 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + table 1946 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + table 1949 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + table 1952 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; table 1953 drivers/net/ethernet/sfc/farch.c if (table->size) { table 1956 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + table 1960 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + table 1964 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; table 1965 drivers/net/ethernet/sfc/farch.c if (table->size) { table 1968 drivers/net/ethernet/sfc/farch.c table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); table 1971 drivers/net/ethernet/sfc/farch.c !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & table 1975 drivers/net/ethernet/sfc/farch.c table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); table 1978 drivers/net/ethernet/sfc/farch.c !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & table 1987 drivers/net/ethernet/sfc/farch.c !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & table 1988 drivers/net/ethernet/sfc/farch.c table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & table 2007 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2012 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; table 2013 drivers/net/ethernet/sfc/farch.c if (table->size) { table 2016 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + table 2020 drivers/net/ethernet/sfc/farch.c table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + table 2374 drivers/net/ethernet/sfc/farch.c if (state->table[table_id].size != 0) table 2376 drivers/net/ethernet/sfc/farch.c state->table[table_id].size; table 2387 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2400 drivers/net/ethernet/sfc/farch.c table = &state->table[efx_farch_filter_spec_table_id(&spec)]; table 2401 drivers/net/ethernet/sfc/farch.c if (table->size == 0) { table 2408 drivers/net/ethernet/sfc/farch.c table->search_limit[spec.type]); table 2410 drivers/net/ethernet/sfc/farch.c if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { table 2436 drivers/net/ethernet/sfc/farch.c unsigned int max_rep_depth = table->search_limit[spec.type]; table 2441 drivers/net/ethernet/sfc/farch.c unsigned int i = hash & (table->size - 1); table 2447 drivers/net/ethernet/sfc/farch.c if (!test_bit(i, table->used_bitmap)) { table 2451 drivers/net/ethernet/sfc/farch.c &table->spec[i])) { table 2470 drivers/net/ethernet/sfc/farch.c i = (i + incr) & (table->size - 1); table 2480 drivers/net/ethernet/sfc/farch.c &table->spec[rep_index]; table 2497 drivers/net/ethernet/sfc/farch.c __set_bit(ins_index, table->used_bitmap); table 2498 drivers/net/ethernet/sfc/farch.c ++table->used; table 2500 drivers/net/ethernet/sfc/farch.c table->spec[ins_index] = spec; table 2502 drivers/net/ethernet/sfc/farch.c if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { table 2505 drivers/net/ethernet/sfc/farch.c if (table->search_limit[spec.type] < depth) { table 2506 drivers/net/ethernet/sfc/farch.c table->search_limit[spec.type] = depth; table 2514 drivers/net/ethernet/sfc/farch.c table->offset + table->step * ins_index); table 2520 drivers/net/ethernet/sfc/farch.c efx_farch_filter_table_clear_entry(efx, table, table 2536 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table, table 2541 drivers/net/ethernet/sfc/farch.c EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); table 2542 drivers/net/ethernet/sfc/farch.c BUG_ON(table->offset == 0); /* can't clear MAC default filters */ table 2544 drivers/net/ethernet/sfc/farch.c __clear_bit(filter_idx, table->used_bitmap); table 2545 drivers/net/ethernet/sfc/farch.c --table->used; table 2546 drivers/net/ethernet/sfc/farch.c memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); table 2548 drivers/net/ethernet/sfc/farch.c efx_writeo(efx, &filter, table->offset + table->step * filter_idx); table 2556 drivers/net/ethernet/sfc/farch.c if (unlikely(table->used == 0)) { table 2557 drivers/net/ethernet/sfc/farch.c memset(table->search_limit, 0, sizeof(table->search_limit)); table 2558 drivers/net/ethernet/sfc/farch.c if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) table 2566 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table, table 2570 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; table 2572 drivers/net/ethernet/sfc/farch.c if (!test_bit(filter_idx, table->used_bitmap) || table 2580 drivers/net/ethernet/sfc/farch.c efx_farch_filter_table_clear_entry(efx, table, filter_idx); table 2592 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2600 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2603 drivers/net/ethernet/sfc/farch.c if (filter_idx >= table->size) table 2606 drivers/net/ethernet/sfc/farch.c spec = &table->spec[filter_idx]; table 2608 drivers/net/ethernet/sfc/farch.c rc = efx_farch_filter_remove(efx, table, filter_idx, priority); table 2620 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2630 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2633 drivers/net/ethernet/sfc/farch.c if (filter_idx >= table->size) table 2635 drivers/net/ethernet/sfc/farch.c spec = &table->spec[filter_idx]; table 2637 drivers/net/ethernet/sfc/farch.c if (test_bit(filter_idx, table->used_bitmap) && table 2654 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table = &state->table[table_id]; table 2658 drivers/net/ethernet/sfc/farch.c for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { table 2659 drivers/net/ethernet/sfc/farch.c if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) table 2660 drivers/net/ethernet/sfc/farch.c efx_farch_filter_remove(efx, table, table 2683 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2692 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2693 drivers/net/ethernet/sfc/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2694 drivers/net/ethernet/sfc/farch.c if (test_bit(filter_idx, table->used_bitmap) && table 2695 drivers/net/ethernet/sfc/farch.c table->spec[filter_idx].priority == priority) table 2711 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2720 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2721 drivers/net/ethernet/sfc/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2722 drivers/net/ethernet/sfc/farch.c if (test_bit(filter_idx, table->used_bitmap) && table 2723 drivers/net/ethernet/sfc/farch.c table->spec[filter_idx].priority == priority) { table 2729 drivers/net/ethernet/sfc/farch.c &table->spec[filter_idx], filter_idx); table 2744 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2751 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2754 drivers/net/ethernet/sfc/farch.c if (table->step == 0) table 2757 drivers/net/ethernet/sfc/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2758 drivers/net/ethernet/sfc/farch.c if (!test_bit(filter_idx, table->used_bitmap)) table 2760 drivers/net/ethernet/sfc/farch.c efx_farch_filter_build(&filter, &table->spec[filter_idx]); table 2762 drivers/net/ethernet/sfc/farch.c table->offset + table->step * filter_idx); table 2778 drivers/net/ethernet/sfc/farch.c kfree(state->table[table_id].used_bitmap); table 2779 drivers/net/ethernet/sfc/farch.c vfree(state->table[table_id].spec); table 2787 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2796 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; table 2797 drivers/net/ethernet/sfc/farch.c table->id = EFX_FARCH_FILTER_TABLE_RX_IP; table 2798 drivers/net/ethernet/sfc/farch.c table->offset = FR_BZ_RX_FILTER_TBL0; table 2799 drivers/net/ethernet/sfc/farch.c table->size = FR_BZ_RX_FILTER_TBL0_ROWS; table 2800 drivers/net/ethernet/sfc/farch.c table->step = FR_BZ_RX_FILTER_TBL0_STEP; table 2802 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; table 2803 drivers/net/ethernet/sfc/farch.c table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; table 2804 drivers/net/ethernet/sfc/farch.c table->offset = FR_CZ_RX_MAC_FILTER_TBL0; table 2805 drivers/net/ethernet/sfc/farch.c table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; table 2806 drivers/net/ethernet/sfc/farch.c table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; table 2808 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; table 2809 drivers/net/ethernet/sfc/farch.c table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; table 2810 drivers/net/ethernet/sfc/farch.c table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; table 2812 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; table 2813 drivers/net/ethernet/sfc/farch.c table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; table 2814 drivers/net/ethernet/sfc/farch.c table->offset = FR_CZ_TX_MAC_FILTER_TBL0; table 2815 drivers/net/ethernet/sfc/farch.c table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; table 2816 drivers/net/ethernet/sfc/farch.c table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; table 2819 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2820 drivers/net/ethernet/sfc/farch.c if (table->size == 0) table 2822 drivers/net/ethernet/sfc/farch.c table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), table 2825 drivers/net/ethernet/sfc/farch.c if (!table->used_bitmap) table 2827 drivers/net/ethernet/sfc/farch.c table->spec = vzalloc(array_size(sizeof(*table->spec), table 2828 drivers/net/ethernet/sfc/farch.c table->size)); table 2829 drivers/net/ethernet/sfc/farch.c if (!table->spec) table 2833 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; table 2834 drivers/net/ethernet/sfc/farch.c if (table->size) { table 2840 drivers/net/ethernet/sfc/farch.c spec = &table->spec[i]; table 2843 drivers/net/ethernet/sfc/farch.c __set_bit(i, table->used_bitmap); table 2861 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2870 drivers/net/ethernet/sfc/farch.c table = &state->table[table_id]; table 2872 drivers/net/ethernet/sfc/farch.c for (filter_idx = 0; filter_idx < table->size; filter_idx++) { table 2873 drivers/net/ethernet/sfc/farch.c if (!test_bit(filter_idx, table->used_bitmap) || table 2874 drivers/net/ethernet/sfc/farch.c table->spec[filter_idx].dmaq_id >= table 2879 drivers/net/ethernet/sfc/farch.c table->spec[filter_idx].flags |= table 2882 drivers/net/ethernet/sfc/farch.c table->spec[filter_idx].flags &= table 2889 drivers/net/ethernet/sfc/farch.c efx_farch_filter_build(&filter, &table->spec[filter_idx]); table 2891 drivers/net/ethernet/sfc/farch.c table->offset + table->step * filter_idx); table 2906 drivers/net/ethernet/sfc/farch.c struct efx_farch_filter_table *table; table 2912 drivers/net/ethernet/sfc/farch.c table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; table 2913 drivers/net/ethernet/sfc/farch.c if (test_bit(index, table->used_bitmap) && table 2914 drivers/net/ethernet/sfc/farch.c table->spec[index].priority == EFX_FILTER_PRI_HINT) { table 2918 drivers/net/ethernet/sfc/farch.c efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); table 2940 drivers/net/ethernet/sfc/farch.c efx_farch_filter_table_clear_entry(efx, table, index); table 371 drivers/net/ethernet/sfc/nic.c const struct efx_nic_reg_table *table; table 381 drivers/net/ethernet/sfc/nic.c for (table = efx_nic_reg_tables; table 382 drivers/net/ethernet/sfc/nic.c table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); table 383 drivers/net/ethernet/sfc/nic.c table++) table 384 drivers/net/ethernet/sfc/nic.c if (efx->type->revision >= table->min_revision && table 385 drivers/net/ethernet/sfc/nic.c efx->type->revision <= table->max_revision) table 386 drivers/net/ethernet/sfc/nic.c len += table->rows * min_t(size_t, table->step, 16); table 394 drivers/net/ethernet/sfc/nic.c const struct efx_nic_reg_table *table; table 406 drivers/net/ethernet/sfc/nic.c for (table = efx_nic_reg_tables; table 407 drivers/net/ethernet/sfc/nic.c table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); table 408 drivers/net/ethernet/sfc/nic.c table++) { table 411 drivers/net/ethernet/sfc/nic.c if (!(efx->type->revision >= table->min_revision && table 412 drivers/net/ethernet/sfc/nic.c efx->type->revision <= table->max_revision)) table 415 drivers/net/ethernet/sfc/nic.c size = min_t(size_t, table->step, 16); table 417 drivers/net/ethernet/sfc/nic.c for (i = 0; i < table->rows; i++) { table 418 drivers/net/ethernet/sfc/nic.c switch (table->step) { table 420 drivers/net/ethernet/sfc/nic.c efx_readd(efx, buf, table->offset + 4 * i); table 424 drivers/net/ethernet/sfc/nic.c efx->membase + table->offset, table 428 drivers/net/ethernet/sfc/nic.c efx_reado_table(efx, buf, table->offset, i); table 431 drivers/net/ethernet/sfc/nic.c efx_reado_table(efx, buf, table->offset, 2 * i); table 158 drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c clk_configs->m250_div.table = div_table; table 544 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c for (i = 0; i < ARRAY_SIZE(cfg->table); i++) { table 545 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]); table 128 drivers/net/ethernet/stmicro/stmmac/stmmac.h u32 table[STMMAC_RSS_MAX_TABLE_SIZE]; table 801 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c return ARRAY_SIZE(priv->rss.table); table 811 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) table 812 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c indir[i] = priv->rss.table[i]; table 833 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) table 834 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c priv->rss.table[i] = indir[i]; table 4597 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) table 4598 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); table 2367 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c const u32 *table) table 2373 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c tval = table[i]; table 488 drivers/net/ethernet/synopsys/dwc-xlgmac.h const u32 *table); table 1076 drivers/net/fddi/skfp/fplustm.c for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ table 1107 drivers/net/fddi/skfp/fplustm.c for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ table 1230 drivers/net/fddi/skfp/fplustm.c for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) { table 222 drivers/net/fddi/skfp/h/fplustm.h } table[FPMAX_MULTICAST] ; table 1456 drivers/net/usb/aqc111.c #define AQC111_USB_ETH_DEV(vid, pid, table) \ table 1458 drivers/net/usb/aqc111.c .driver_info = (unsigned long)&(table) \ table 1465 drivers/net/usb/aqc111.c .driver_info = (unsigned long)&(table), table 383 drivers/net/wireless/ath/wcn36xx/smd.c &nv_d->table + fm_offset, table 105 drivers/net/wireless/ath/wcn36xx/wcn36xx.h u8 table; table 1147 drivers/net/wireless/ath/wil6210/wmi.h __le32 table; table 3940 drivers/net/wireless/ath/wil6210/wmi.h __le32 table; table 98 drivers/net/wireless/broadcom/b43/phy_a.h u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset); table 99 drivers/net/wireless/broadcom/b43/phy_a.h void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, table 101 drivers/net/wireless/broadcom/b43/phy_a.h u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset); table 102 drivers/net/wireless/broadcom/b43/phy_a.h void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, table 303 drivers/net/wireless/broadcom/b43/phy_g.c u16 table; table 310 drivers/net/wireless/broadcom/b43/phy_g.c table = B43_OFDMTAB_GAINX; table 312 drivers/net/wireless/broadcom/b43/phy_g.c table = B43_OFDMTAB_GAINX_R1; table 314 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_write16(dev, table, i, first); table 317 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_write16(dev, table, i, second); table 332 drivers/net/wireless/broadcom/b43/phy_g.c u16 table; table 340 drivers/net/wireless/broadcom/b43/phy_g.c table = B43_OFDMTAB_GAINX; table 342 drivers/net/wireless/broadcom/b43/phy_g.c table = B43_OFDMTAB_GAINX_R1; table 348 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_write16(dev, table, i, tmp); table 352 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_write16(dev, table, i, i - start); table 813 drivers/net/wireless/broadcom/b43/phy_g.c #define ofdmtab_stacksave(table, offset) \ table 815 drivers/net/wireless/broadcom/b43/phy_g.c _stack_save(stack, &stackidx, 0x3, (offset)|(table), \ table 816 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_read16(dev, (table), (offset))); \ table 818 drivers/net/wireless/broadcom/b43/phy_g.c #define ofdmtab_stackrestore(table, offset) \ table 820 drivers/net/wireless/broadcom/b43/phy_g.c b43_ofdmtab_write16(dev, (table), (offset), \ table 822 drivers/net/wireless/broadcom/b43/phy_g.c (offset)|(table))); \ table 55 drivers/net/wireless/broadcom/b43/phy_g.h u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset); table 56 drivers/net/wireless/broadcom/b43/phy_g.h void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value); table 3790 drivers/net/wireless/broadcom/b43/phy_n.c const u32 *table = b43_nphy_get_tx_gain_table(dev); table 3792 drivers/net/wireless/broadcom/b43/phy_n.c if (!table) table 3794 drivers/net/wireless/broadcom/b43/phy_n.c txgain = *(table + txpi[i]); table 4221 drivers/net/wireless/broadcom/b43/phy_n.c const u32 *table = NULL; table 4227 drivers/net/wireless/broadcom/b43/phy_n.c table = b43_nphy_get_tx_gain_table(dev); table 4228 drivers/net/wireless/broadcom/b43/phy_n.c if (!table) table 4231 drivers/net/wireless/broadcom/b43/phy_n.c b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table); table 4232 drivers/net/wireless/broadcom/b43/phy_n.c b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table); table 4238 drivers/net/wireless/broadcom/b43/phy_n.c nphy->gmval = (table[0] >> 16) & 0x7000; table 4256 drivers/net/wireless/broadcom/b43/phy_n.c pga_gain = (table[i] >> 24) & 0xf; table 4257 drivers/net/wireless/broadcom/b43/phy_n.c pad_gain = (table[i] >> 19) & 0x1f; table 4263 drivers/net/wireless/broadcom/b43/phy_n.c pga_gain = (table[i] >> 24) & 0xF; table 5012 drivers/net/wireless/broadcom/b43/phy_n.c const u32 *table = NULL; table 5053 drivers/net/wireless/broadcom/b43/phy_n.c table = b43_nphy_get_tx_gain_table(dev); table 5054 drivers/net/wireless/broadcom/b43/phy_n.c if (!table) table 5058 drivers/net/wireless/broadcom/b43/phy_n.c target.ipa[i] = (table[index[i]] >> 16) & 0x7; table 5059 drivers/net/wireless/broadcom/b43/phy_n.c target.pad[i] = (table[index[i]] >> 19) & 0x1F; table 5060 drivers/net/wireless/broadcom/b43/phy_n.c target.pga[i] = (table[index[i]] >> 24) & 0xF; table 5061 drivers/net/wireless/broadcom/b43/phy_n.c target.txgm[i] = (table[index[i]] >> 28) & 0x7; table 5062 drivers/net/wireless/broadcom/b43/phy_n.c target.tx_lpf[i] = (table[index[i]] >> 31) & 0x1; table 5064 drivers/net/wireless/broadcom/b43/phy_n.c target.ipa[i] = (table[index[i]] >> 16) & 0xF; table 5065 drivers/net/wireless/broadcom/b43/phy_n.c target.pad[i] = (table[index[i]] >> 20) & 0xF; table 5066 drivers/net/wireless/broadcom/b43/phy_n.c target.pga[i] = (table[index[i]] >> 24) & 0xF; table 5067 drivers/net/wireless/broadcom/b43/phy_n.c target.txgm[i] = (table[index[i]] >> 28) & 0xF; table 5069 drivers/net/wireless/broadcom/b43/phy_n.c target.ipa[i] = (table[index[i]] >> 16) & 0x3; table 5070 drivers/net/wireless/broadcom/b43/phy_n.c target.pad[i] = (table[index[i]] >> 18) & 0x3; table 5071 drivers/net/wireless/broadcom/b43/phy_n.c target.pga[i] = (table[index[i]] >> 20) & 0x7; table 5072 drivers/net/wireless/broadcom/b43/phy_n.c target.txgm[i] = (table[index[i]] >> 23) & 0x7; table 5219 drivers/net/wireless/broadcom/b43/phy_n.c u16 *table = NULL; table 5228 drivers/net/wireless/broadcom/b43/phy_n.c table = nphy->cal_cache.txcal_coeffs_2G; table 5233 drivers/net/wireless/broadcom/b43/phy_n.c table = nphy->cal_cache.txcal_coeffs_5G; table 5275 drivers/net/wireless/broadcom/b43/phy_n.c b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 8, table); table 5289 drivers/net/wireless/broadcom/b43/phy_n.c u16 *table = NULL; table 5298 drivers/net/wireless/broadcom/b43/phy_n.c table = nphy->cal_cache.txcal_coeffs_2G; table 5303 drivers/net/wireless/broadcom/b43/phy_n.c table = nphy->cal_cache.txcal_coeffs_5G; table 5307 drivers/net/wireless/broadcom/b43/phy_n.c b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table); table 5311 drivers/net/wireless/broadcom/b43/phy_n.c table[i] = coef[i]; table 5382 drivers/net/wireless/broadcom/b43/phy_n.c const u16 *table; table 5449 drivers/net/wireless/broadcom/b43/phy_n.c table = nphy->mphase_txcal_bestcoeffs; table 5455 drivers/net/wireless/broadcom/b43/phy_n.c table = nphy->txiqlocal_bestc; table 5462 drivers/net/wireless/broadcom/b43/phy_n.c table = tbl_tx_iqlo_cal_startcoefs_nphyrev3; table 5465 drivers/net/wireless/broadcom/b43/phy_n.c table = tbl_tx_iqlo_cal_startcoefs; table 5471 drivers/net/wireless/broadcom/b43/phy_n.c b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table); table 527 drivers/net/wireless/broadcom/b43/radio_2057.c u16 *table = NULL; table 532 drivers/net/wireless/broadcom/b43/radio_2057.c table = r2057_rev4_init[0]; table 537 drivers/net/wireless/broadcom/b43/radio_2057.c table = r2057_rev5_init[0]; table 540 drivers/net/wireless/broadcom/b43/radio_2057.c table = r2057_rev7_init[0]; table 546 drivers/net/wireless/broadcom/b43/radio_2057.c table = r2057_rev5a_init[0]; table 552 drivers/net/wireless/broadcom/b43/radio_2057.c table = r2057_rev9_init[0]; table 558 drivers/net/wireless/broadcom/b43/radio_2057.c table = r2057_rev14_init[0]; table 564 drivers/net/wireless/broadcom/b43/radio_2057.c B43_WARN_ON(!table); table 566 drivers/net/wireless/broadcom/b43/radio_2057.c if (table) { table 567 drivers/net/wireless/broadcom/b43/radio_2057.c for (i = 0; i < size; i++, table += 2) table 568 drivers/net/wireless/broadcom/b43/radio_2057.c b43_radio_write(dev, table[0], table[1]); table 321 drivers/net/wireless/broadcom/b43/radio_2059.c u16 *table = NULL; table 326 drivers/net/wireless/broadcom/b43/radio_2059.c table = r2059_phy_rev1_init[0]; table 334 drivers/net/wireless/broadcom/b43/radio_2059.c for (i = 0; i < size; i++, table += 2) table 335 drivers/net/wireless/broadcom/b43/radio_2059.c b43_radio_write(dev, R2059_ALL | table[0], table[1]); table 366 drivers/net/wireless/broadcom/b43/tables.c u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) table 371 drivers/net/wireless/broadcom/b43/tables.c addr = table + offset; table 386 drivers/net/wireless/broadcom/b43/tables.c void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, table 392 drivers/net/wireless/broadcom/b43/tables.c addr = table + offset; table 403 drivers/net/wireless/broadcom/b43/tables.c u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) table 409 drivers/net/wireless/broadcom/b43/tables.c addr = table + offset; table 424 drivers/net/wireless/broadcom/b43/tables.c void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, table 430 drivers/net/wireless/broadcom/b43/tables.c addr = table + offset; table 443 drivers/net/wireless/broadcom/b43/tables.c u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset) table 445 drivers/net/wireless/broadcom/b43/tables.c b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); table 449 drivers/net/wireless/broadcom/b43/tables.c void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value) table 451 drivers/net/wireless/broadcom/b43/tables.c b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); table 2395 drivers/net/wireless/broadcom/b43/tables_lpphy.c struct lpphy_tx_gain_table_entry *table) table 2400 drivers/net/wireless/broadcom/b43/tables_lpphy.c lpphy_write_gain_table(dev, i, table[i]); table 10 drivers/net/wireless/broadcom/b43/tables_lpphy.h #define B43_LPTAB8(table, offset) (((table) << 10) | (offset) | B43_LPTAB_8BIT) table 11 drivers/net/wireless/broadcom/b43/tables_lpphy.h #define B43_LPTAB16(table, offset) (((table) << 10) | (offset) | B43_LPTAB_16BIT) table 12 drivers/net/wireless/broadcom/b43/tables_lpphy.h #define B43_LPTAB32(table, offset) (((table) << 10) | (offset) | B43_LPTAB_32BIT) table 39 drivers/net/wireless/broadcom/b43/tables_lpphy.h struct lpphy_tx_gain_table_entry *table); table 3277 drivers/net/wireless/broadcom/b43/tables_nphy.c #define check(table, size) \ table 3278 drivers/net/wireless/broadcom/b43/tables_nphy.c BUILD_BUG_ON(ARRAY_SIZE(b43_ntab_##table) != B43_NTAB_##size##_SIZE) table 78 drivers/net/wireless/broadcom/b43/tables_nphy.h #define B43_NTAB8(table, offset) (((table) << 10) | (offset) | B43_NTAB_8BIT) table 79 drivers/net/wireless/broadcom/b43/tables_nphy.h #define B43_NTAB16(table, offset) (((table) << 10) | (offset) | B43_NTAB_16BIT) table 80 drivers/net/wireless/broadcom/b43/tables_nphy.h #define B43_NTAB32(table, offset) (((table) << 10) | (offset) | B43_NTAB_32BIT) table 10 drivers/net/wireless/broadcom/b43/tables_phy_ht.h #define B43_HTTAB8(table, offset) (((table) << 10) | (offset) | B43_HTTAB_8BIT) table 11 drivers/net/wireless/broadcom/b43/tables_phy_ht.h #define B43_HTTAB16(table, offset) (((table) << 10) | (offset) | B43_HTTAB_16BIT) table 12 drivers/net/wireless/broadcom/b43/tables_phy_ht.h #define B43_HTTAB32(table, offset) (((table) << 10) | (offset) | B43_HTTAB_32BIT) table 10 drivers/net/wireless/broadcom/b43/tables_phy_lcn.h #define B43_LCNTAB8(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_8BIT) table 11 drivers/net/wireless/broadcom/b43/tables_phy_lcn.h #define B43_LCNTAB16(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_16BIT) table 12 drivers/net/wireless/broadcom/b43/tables_phy_lcn.h #define B43_LCNTAB32(table, offset) (((table) << 10) | (offset) | B43_LCNTAB_32BIT) table 6927 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c cc = &country_codes->table[i]; table 6941 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ccreq->rev = cpu_to_le32(country_codes->table[found_index].rev); table 6942 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c memcpy(ccreq->ccode, country_codes->table[found_index].cc, table 1008 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c struct in6_addr *table; table 1017 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c table = ifp->ipv6_addr_tbl; table 1019 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c if (ipv6_addr_equal(&ifa->addr, &table[i])) table 1026 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c table[ifp->ipv6addr_idx++] = ifa->addr; table 1029 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c table[i] = table[i + 1]; table 1030 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c table[NDOL_MAX_ENTRIES - 1] = ifa->addr; table 1037 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c table[i] = table[i + 1]; table 1038 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c memset(&table[i], 0, sizeof(table[i])); table 856 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c struct brcmf_fws_mac_descriptor *table; table 866 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c table = &fws->desc.nodes[0]; table 868 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx); table 1344 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c struct brcmf_fws_mac_descriptor *table; table 1353 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c table = (struct brcmf_fws_mac_descriptor *)&fws->desc; table 1358 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c entry = &table[(node_pos + i) % num_nodes]; table 233 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h uint table; table 2318 drivers/net/wireless/intel/iwlegacy/3945.c struct il3945_rate_scaling_info *table = rate_cmd.table; table 2323 drivers/net/wireless/intel/iwlegacy/3945.c table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp); table 2324 drivers/net/wireless/intel/iwlegacy/3945.c table[idx].try_cnt = il->retry_rate; table 2326 drivers/net/wireless/intel/iwlegacy/3945.c table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx; table 2335 drivers/net/wireless/intel/iwlegacy/3945.c table[i].next_rate_idx = table 2339 drivers/net/wireless/intel/iwlegacy/3945.c table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL; table 2342 drivers/net/wireless/intel/iwlegacy/3945.c table[RATE_6M_IDX_TBL].next_rate_idx = table 2356 drivers/net/wireless/intel/iwlegacy/3945.c table[i].next_rate_idx = table 2361 drivers/net/wireless/intel/iwlegacy/3945.c table[idx].next_rate_idx = RATE_5M_IDX_TBL; table 408 drivers/net/wireless/intel/iwlegacy/4965-calib.c il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]); table 415 drivers/net/wireless/intel/iwlegacy/4965-calib.c (&cmd.table[0], &(il->sensitivity_tbl[0]), table 422 drivers/net/wireless/intel/iwlegacy/4965-calib.c memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]), table 799 drivers/net/wireless/intel/iwlegacy/4965-rs.c struct il_link_quality_cmd *table; table 836 drivers/net/wireless/intel/iwlegacy/4965-rs.c table = &lq_sta->lq; table 837 drivers/net/wireless/intel/iwlegacy/4965-rs.c tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); table 918 drivers/net/wireless/intel/iwlegacy/4965-rs.c tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); table 944 drivers/net/wireless/intel/iwlegacy/4965-rs.c tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); table 1778 drivers/net/wireless/intel/iwlegacy/commands.h struct il3945_rate_scaling_info table[IL_MAX_RATES]; table 3220 drivers/net/wireless/intel/iwlegacy/commands.h __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */ table 481 drivers/net/wireless/intel/iwlwifi/dvm/calib.c iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); table 487 drivers/net/wireless/intel/iwlwifi/dvm/calib.c if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), table 494 drivers/net/wireless/intel/iwlwifi/dvm/calib.c memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), table 3048 drivers/net/wireless/intel/iwlwifi/dvm/commands.h __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ table 1625 drivers/net/wireless/intel/iwlwifi/dvm/main.c struct iwl_error_event_table table; table 1646 drivers/net/wireless/intel/iwlwifi/dvm/main.c iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); table 1648 drivers/net/wireless/intel/iwlwifi/dvm/main.c if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { table 1651 drivers/net/wireless/intel/iwlwifi/dvm/main.c priv->status, table.valid); table 1654 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, table 1655 drivers/net/wireless/intel/iwlwifi/dvm/main.c desc_lookup(table.error_id)); table 1656 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | uPc\n", table.pc); table 1657 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1); table 1658 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2); table 1659 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1); table 1660 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2); table 1661 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | data1\n", table.data1); table 1662 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | data2\n", table.data2); table 1663 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | line\n", table.line); table 1664 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time); table 1665 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low); table 1666 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi); table 1667 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1); table 1668 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2); table 1669 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3); table 1670 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver); table 1671 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver); table 1672 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver); table 1673 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd); table 1674 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | isr0\n", table.isr0); table 1675 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | isr1\n", table.isr1); table 1676 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | isr2\n", table.isr2); table 1677 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | isr3\n", table.isr3); table 1678 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | isr4\n", table.isr4); table 1679 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref); table 1680 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event); table 1681 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control); table 1682 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration); table 1683 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); table 1684 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); table 1685 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); table 1686 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp); table 1687 drivers/net/wireless/intel/iwlwifi/dvm/main.c IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler); table 149 drivers/net/wireless/intel/iwlwifi/dvm/power.c const struct iwl_power_vec_entry *table; table 156 drivers/net/wireless/intel/iwlwifi/dvm/power.c table = apm_range_2; table 158 drivers/net/wireless/intel/iwlwifi/dvm/power.c table = apm_range_1; table 160 drivers/net/wireless/intel/iwlwifi/dvm/power.c table = apm_range_0; table 162 drivers/net/wireless/intel/iwlwifi/dvm/power.c table = range_2; table 164 drivers/net/wireless/intel/iwlwifi/dvm/power.c table = range_1; table 166 drivers/net/wireless/intel/iwlwifi/dvm/power.c table = range_0; table 172 drivers/net/wireless/intel/iwlwifi/dvm/power.c *cmd = table[lvl].cmd; table 181 drivers/net/wireless/intel/iwlwifi/dvm/power.c skip = table[lvl].no_dtim; table 884 drivers/net/wireless/intel/iwlwifi/dvm/rs.c struct iwl_link_quality_cmd *table; table 924 drivers/net/wireless/intel/iwlwifi/dvm/rs.c table = &lq_sta->lq; table 925 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); table 1004 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); table 1029 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); table 429 drivers/net/wireless/intel/iwlwifi/fw/api/power.h struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; table 440 drivers/net/wireless/intel/iwlwifi/fw/api/power.h struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; table 631 drivers/net/wireless/intel/iwlwifi/fw/dbg.c struct scatterlist *table; table 634 drivers/net/wireless/intel/iwlwifi/fw/dbg.c table = kcalloc(nents, sizeof(*table), GFP_KERNEL); table 635 drivers/net/wireless/intel/iwlwifi/fw/dbg.c if (!table) table 637 drivers/net/wireless/intel/iwlwifi/fw/dbg.c sg_init_table(table, nents); table 638 drivers/net/wireless/intel/iwlwifi/fw/dbg.c iter = table; table 639 drivers/net/wireless/intel/iwlwifi/fw/dbg.c for_each_sg(table, iter, sg_nents(table), i) { table 643 drivers/net/wireless/intel/iwlwifi/fw/dbg.c iter = table; table 644 drivers/net/wireless/intel/iwlwifi/fw/dbg.c for_each_sg(table, iter, sg_nents(table), i) { table 649 drivers/net/wireless/intel/iwlwifi/fw/dbg.c kfree(table); table 656 drivers/net/wireless/intel/iwlwifi/fw/dbg.c return table; table 681 drivers/net/wireless/intel/iwlwifi/mvm/fw.c union acpi_object *table, table 690 drivers/net/wireless/intel/iwlwifi/mvm/fw.c if ((table[i].type != ACPI_TYPE_INTEGER) || table 691 drivers/net/wireless/intel/iwlwifi/mvm/fw.c (table[i].integer.value > U8_MAX)) table 694 drivers/net/wireless/intel/iwlwifi/mvm/fw.c profile->table[i] = table[i].integer.value; table 702 drivers/net/wireless/intel/iwlwifi/mvm/fw.c union acpi_object *wifi_pkg, *table, *data; table 726 drivers/net/wireless/intel/iwlwifi/mvm/fw.c table = &wifi_pkg->package.elements[2]; table 731 drivers/net/wireless/intel/iwlwifi/mvm/fw.c ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0], table 896 drivers/net/wireless/intel/iwlwifi/mvm/fw.c cpu_to_le16(prof->table[idx]); table 898 drivers/net/wireless/intel/iwlwifi/mvm/fw.c j, prof->table[idx]); table 1005 drivers/net/wireless/intel/iwlwifi/mvm/fw.c (struct iwl_per_chain_offset *)&cmd.table[i]; table 779 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h u8 table[ACPI_SAR_TABLE_SIZE]; table 3071 drivers/net/wireless/intel/iwlwifi/mvm/rs.c struct iwl_lq_cmd *table; table 3149 drivers/net/wireless/intel/iwlwifi/mvm/rs.c table = &lq_sta->lq; table 3150 drivers/net/wireless/intel/iwlwifi/mvm/rs.c lq_hwrate = le32_to_cpu(table->rs_table[0]); table 3157 drivers/net/wireless/intel/iwlwifi/mvm/rs.c if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { table 3160 drivers/net/wireless/intel/iwlwifi/mvm/rs.c lq_color, LQ_FLAG_COLOR_GET(table->flags)); table 3254 drivers/net/wireless/intel/iwlwifi/mvm/rs.c lq_hwrate = le32_to_cpu(table->rs_table[i]); table 465 drivers/net/wireless/intel/iwlwifi/mvm/utils.c struct iwl_umac_error_event_table table; table 473 drivers/net/wireless/intel/iwlwifi/mvm/utils.c iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); table 475 drivers/net/wireless/intel/iwlwifi/mvm/utils.c if (table.valid) table 476 drivers/net/wireless/intel/iwlwifi/mvm/utils.c mvm->fwrt.dump.umac_err_id = table.error_id; table 478 drivers/net/wireless/intel/iwlwifi/mvm/utils.c if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { table 481 drivers/net/wireless/intel/iwlwifi/mvm/utils.c mvm->status, table.valid); table 484 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, table 485 drivers/net/wireless/intel/iwlwifi/mvm/utils.c desc_lookup(table.error_id)); table 486 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); table 487 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); table 488 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); table 489 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); table 490 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); table 491 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); table 492 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); table 493 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); table 494 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); table 495 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); table 496 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); table 497 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); table 498 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); table 504 drivers/net/wireless/intel/iwlwifi/mvm/utils.c struct iwl_error_event_table table; table 539 drivers/net/wireless/intel/iwlwifi/mvm/utils.c iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); table 541 drivers/net/wireless/intel/iwlwifi/mvm/utils.c if (table.valid) table 542 drivers/net/wireless/intel/iwlwifi/mvm/utils.c mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id; table 544 drivers/net/wireless/intel/iwlwifi/mvm/utils.c if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { table 547 drivers/net/wireless/intel/iwlwifi/mvm/utils.c mvm->status, table.valid); table 554 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, table 555 drivers/net/wireless/intel/iwlwifi/mvm/utils.c desc_lookup(table.error_id)); table 556 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); table 557 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); table 558 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); table 559 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); table 560 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); table 561 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | data1\n", table.data1); table 562 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | data2\n", table.data2); table 563 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | data3\n", table.data3); table 564 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); table 565 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); table 566 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); table 567 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); table 568 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); table 569 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); table 570 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); table 571 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); table 572 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); table 573 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); table 574 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); table 575 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); table 576 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); table 577 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); table 578 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); table 579 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); table 580 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); table 581 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); table 582 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); table 583 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); table 584 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); table 585 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); table 586 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); table 587 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); table 588 drivers/net/wireless/intel/iwlwifi/mvm/utils.c IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); table 1390 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h struct rtl8xxxu_rfregval *table, table 2386 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c struct rtl8xxxu_rfregval *table, table 2439 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c rtl8xxxu_init_rf_regs(priv, table, path); table 174 drivers/net/wireless/realtek/rtw88/phy.c u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100}; table 180 drivers/net/wireless/realtek/rtw88/phy.c table[i] += RA_FLOOR_UP_GAP; table 183 drivers/net/wireless/realtek/rtw88/phy.c if (rssi < table[i]) { table 501 drivers/net/wireless/ti/wl1251/acx.c ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN; table 502 drivers/net/wireless/ti/wl1251/acx.c ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE; table 490 drivers/net/wireless/ti/wl1251/acx.h u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; table 1781 drivers/net/wireless/ti/wl12xx/main.c static int wl12xx_get_clock_idx(const struct wl12xx_clock *table, table 1786 drivers/net/wireless/ti/wl12xx/main.c for (i = 0; table[i].freq != 0; i++) table 1787 drivers/net/wireless/ti/wl12xx/main.c if ((table[i].freq == freq) && (table[i].xtal == xtal)) table 1788 drivers/net/wireless/ti/wl12xx/main.c return table[i].hw_idx; table 401 drivers/net/wireless/ti/wlcore/acx.c ie_table->table[idx++] = r->ie; table 402 drivers/net/wireless/ti/wlcore/acx.c ie_table->table[idx++] = r->rule; table 411 drivers/net/wireless/ti/wlcore/acx.c memcpy(&(ie_table->table[idx]), r->oui, table 414 drivers/net/wireless/ti/wlcore/acx.c ie_table->table[idx++] = r->type; table 415 drivers/net/wireless/ti/wlcore/acx.c memcpy(&(ie_table->table[idx]), r->version, table 268 drivers/net/wireless/ti/wlcore/acx.h u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; table 2270 drivers/nvme/host/core.c struct nvme_feat_auto_pst *table; table 2287 drivers/nvme/host/core.c table = kzalloc(sizeof(*table), GFP_KERNEL); table 2288 drivers/nvme/host/core.c if (!table) table 2309 drivers/nvme/host/core.c table->entries[state] = target; table 2361 drivers/nvme/host/core.c max_ps, max_lat_us, (int)sizeof(*table), table); table 2366 drivers/nvme/host/core.c table, sizeof(*table), NULL); table 2370 drivers/nvme/host/core.c kfree(table); table 239 drivers/nvmem/core.c struct nvmem_cell_table *table; table 244 drivers/nvmem/core.c list_for_each_entry(table, &nvmem_cell_tables, node) { table 245 drivers/nvmem/core.c if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { table 246 drivers/nvmem/core.c for (i = 0; i < table->ncells; i++) { table 247 drivers/nvmem/core.c info = &table->cells[i]; table 1278 drivers/nvmem/core.c void nvmem_add_cell_table(struct nvmem_cell_table *table) table 1281 drivers/nvmem/core.c list_add_tail(&table->node, &nvmem_cell_tables); table 1291 drivers/nvmem/core.c void nvmem_del_cell_table(struct nvmem_cell_table *table) table 1294 drivers/nvmem/core.c list_del(&table->node); table 1239 drivers/opp/core.c struct dev_pm_opp *_opp_allocate(struct opp_table *table) table 1245 drivers/opp/core.c count = table->regulator_count > 0 ? table->regulator_count : 1; table 44 drivers/opp/cpu.c struct cpufreq_frequency_table **table) table 79 drivers/opp/cpu.c *table = &freq_table[0]; table 97 drivers/opp/cpu.c struct cpufreq_frequency_table **table) table 99 drivers/opp/cpu.c if (!table) table 102 drivers/opp/cpu.c kfree(*table); table 103 drivers/opp/cpu.c *table = NULL; table 82 drivers/opp/ti-opp-supply.c struct ti_opp_supply_optimum_voltage_table *table; table 125 drivers/opp/ti-opp-supply.c table = kcalloc(data->num_vdd_table, sizeof(*data->vdd_table), table 127 drivers/opp/ti-opp-supply.c if (!table) { table 131 drivers/opp/ti-opp-supply.c data->vdd_table = table; table 134 drivers/opp/ti-opp-supply.c for (i = 0; i < data->num_vdd_table; i++, table++) { table 138 drivers/opp/ti-opp-supply.c table->reference_uv = be32_to_cpup(val++); table 145 drivers/opp/ti-opp-supply.c table->optimized_uv = of_data->efuse_voltage_uv ? tmp : table 149 drivers/opp/ti-opp-supply.c i, efuse_offset, table->reference_uv, table 150 drivers/opp/ti-opp-supply.c table->optimized_uv); table 157 drivers/opp/ti-opp-supply.c if (!table->optimized_uv) { table 159 drivers/opp/ti-opp-supply.c i, efuse_offset, table->reference_uv); table 160 drivers/opp/ti-opp-supply.c table->optimized_uv = table->reference_uv; table 196 drivers/opp/ti-opp-supply.c struct ti_opp_supply_optimum_voltage_table *table; table 201 drivers/opp/ti-opp-supply.c table = data->vdd_table; table 202 drivers/opp/ti-opp-supply.c if (!table) table 206 drivers/opp/ti-opp-supply.c for (i = 0; i < data->num_vdd_table; i++, table++) table 207 drivers/opp/ti-opp-supply.c if (table->reference_uv == reference_uv) table 208 drivers/opp/ti-opp-supply.c return table->optimized_uv; table 267 drivers/parisc/iosapic.c struct irt_entry *table; /* start of interrupt routing tbl */ table 287 drivers/parisc/iosapic.c table = iosapic_alloc_irt(num_entries); table 288 drivers/parisc/iosapic.c if (table == NULL) { table 295 drivers/parisc/iosapic.c status = pdc_pat_get_irt(table, cell_num); table 318 drivers/parisc/iosapic.c table = iosapic_alloc_irt(num_entries); table 319 drivers/parisc/iosapic.c if (!table) { table 326 drivers/parisc/iosapic.c status = pdc_pci_irt(num_entries, 0, table); table 331 drivers/parisc/iosapic.c *irt = table; table 335 drivers/parisc/iosapic.c struct irt_entry *p = table; table 340 drivers/parisc/iosapic.c table, table 36 drivers/parport/procfs.c static int do_active_device(struct ctl_table *table, int write, table 39 drivers/parport/procfs.c struct parport *port = (struct parport *)table->extra1; table 73 drivers/parport/procfs.c static int do_autoprobe(struct ctl_table *table, int write, table 76 drivers/parport/procfs.c struct parport_device_info *info = table->extra2; table 115 drivers/parport/procfs.c static int do_hardware_base_addr(struct ctl_table *table, int write, table 119 drivers/parport/procfs.c struct parport *port = (struct parport *)table->extra1; table 143 drivers/parport/procfs.c static int do_hardware_irq(struct ctl_table *table, int write, table 147 drivers/parport/procfs.c struct parport *port = (struct parport *)table->extra1; table 171 drivers/parport/procfs.c static int do_hardware_dma(struct ctl_table *table, int write, table 175 drivers/parport/procfs.c struct parport *port = (struct parport *)table->extra1; table 199 drivers/parport/procfs.c static int do_hardware_modes(struct ctl_table *table, int write, table 203 drivers/parport/procfs.c struct parport *port = (struct parport *)table->extra1; table 124 drivers/pci/hotplug/acpiphp_ibm.c char *table; table 126 drivers/pci/hotplug/acpiphp_ibm.c size = ibm_get_table_from_acpi(&table); table 129 drivers/pci/hotplug/acpiphp_ibm.c des = (union apci_descriptor *)table; table 133 drivers/pci/hotplug/acpiphp_ibm.c des = (union apci_descriptor *)&table[ind += des->header.len]; table 136 drivers/pci/hotplug/acpiphp_ibm.c des = (union apci_descriptor *)&table[ind += des->generic.len]; table 148 drivers/pci/hotplug/acpiphp_ibm.c kfree(table); table 360 drivers/pci/hotplug/acpiphp_ibm.c char *table = NULL; table 365 drivers/pci/hotplug/acpiphp_ibm.c bytes_read = ibm_get_table_from_acpi(&table); table 367 drivers/pci/hotplug/acpiphp_ibm.c memcpy(buffer, table, bytes_read); table 368 drivers/pci/hotplug/acpiphp_ibm.c kfree(table); table 517 drivers/perf/xgene_pmu.c XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a), table 518 drivers/perf/xgene_pmu.c XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b), table 519 drivers/perf/xgene_pmu.c XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c), table 149 drivers/phy/broadcom/phy-brcm-usb.c static int name_to_value(struct value_to_name_map *table, int count, table 156 drivers/phy/broadcom/phy-brcm-usb.c if (sysfs_streq(name, table[x].name)) { table 164 drivers/phy/broadcom/phy-brcm-usb.c static const char *value_to_name(struct value_to_name_map *table, int count, table 169 drivers/phy/broadcom/phy-brcm-usb.c return table[value].name; table 1469 drivers/pinctrl/intel/pinctrl-intel.c const struct intel_pinctrl_soc_data **table; table 1477 drivers/pinctrl/intel/pinctrl-intel.c table = (const struct intel_pinctrl_soc_data **)match; table 1478 drivers/pinctrl/intel/pinctrl-intel.c for (i = 0; table[i]; i++) { table 1479 drivers/pinctrl/intel/pinctrl-intel.c if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { table 1480 drivers/pinctrl/intel/pinctrl-intel.c data = table[i]; table 1491 drivers/pinctrl/intel/pinctrl-intel.c table = (const struct intel_pinctrl_soc_data **)id->driver_data; table 1492 drivers/pinctrl/intel/pinctrl-intel.c data = table[pdev->id]; table 505 drivers/pinctrl/pinctrl-rza1.c const struct rza1_bidir_entry *table) table 507 drivers/pinctrl/pinctrl-rza1.c const struct rza1_bidir_entry *entry = &table[port]; table 523 drivers/pinctrl/pinctrl-rza1.c const struct rza1_swio_entry *table) table 529 drivers/pinctrl/pinctrl-rza1.c for (i = 0; i < table->npins; ++i) { table 530 drivers/pinctrl/pinctrl-rza1.c swio_pin = &table->pins[i]; table 358 drivers/platform/x86/dell-smbios-base.c struct calling_interface_structure *table = table 369 drivers/platform/x86/dell-smbios-base.c da_supported_commands = table->supportedCmds; table 379 drivers/platform/x86/dell-smbios-base.c memcpy(da_tokens+da_num_tokens, table->tokens, table 60 drivers/platform/x86/dell-smbios-smm.c struct calling_interface_structure *table = table 69 drivers/platform/x86/dell-smbios-smm.c da_command_address = table->cmdIOAddress; table 70 drivers/platform/x86/dell-smbios-smm.c da_command_code = table->cmdIOCode; table 444 drivers/platform/x86/dell-wmi.c struct dell_bios_hotkey_table *table; table 455 drivers/platform/x86/dell-wmi.c table = container_of(dm, struct dell_bios_hotkey_table, header); table 457 drivers/platform/x86/dell-wmi.c hotkey_num = (table->header.length - table 479 drivers/platform/x86/dell-wmi.c &table->keymap[i]; table 91 drivers/platform/x86/pcengines-apuv2.c .table = { table 126 drivers/platform/x86/pcengines-apuv2.c .table = { table 645 drivers/power/supply/power_supply_core.c struct power_supply_battery_ocv_table *table; table 664 drivers/power/supply/power_supply_core.c table = info->ocv_table[index] = table 665 drivers/power/supply/power_supply_core.c devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL); table 673 drivers/power/supply/power_supply_core.c table[i].ocv = be32_to_cpu(*list); table 675 drivers/power/supply/power_supply_core.c table[i].capacity = be32_to_cpu(*list); table 710 drivers/power/supply/power_supply_core.c int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, table 716 drivers/power/supply/power_supply_core.c if (ocv > table[i].ocv) table 720 drivers/power/supply/power_supply_core.c tmp = (table[i - 1].capacity - table[i].capacity) * table 721 drivers/power/supply/power_supply_core.c (ocv - table[i].ocv); table 722 drivers/power/supply/power_supply_core.c tmp /= table[i - 1].ocv - table[i].ocv; table 723 drivers/power/supply/power_supply_core.c cap = tmp + table[i].capacity; table 725 drivers/power/supply/power_supply_core.c cap = table[0].capacity; table 727 drivers/power/supply/power_supply_core.c cap = table[table_len - 1].capacity; table 761 drivers/power/supply/power_supply_core.c struct power_supply_battery_ocv_table *table; table 764 drivers/power/supply/power_supply_core.c table = power_supply_find_ocv2cap_table(info, temp, &table_len); table 765 drivers/power/supply/power_supply_core.c if (!table) table 768 drivers/power/supply/power_supply_core.c return power_supply_ocv2cap_simple(table, table_len, ocv); table 896 drivers/power/supply/sc27xx_fuel_gauge.c struct power_supply_battery_ocv_table *table; table 914 drivers/power/supply/sc27xx_fuel_gauge.c table = power_supply_find_ocv2cap_table(&info, 20, &data->table_len); table 915 drivers/power/supply/sc27xx_fuel_gauge.c if (!table) table 918 drivers/power/supply/sc27xx_fuel_gauge.c data->cap_table = devm_kmemdup(data->dev, table, table 919 drivers/power/supply/sc27xx_fuel_gauge.c data->table_len * sizeof(*table), table 138 drivers/ps3/ps3av.c u32 *table; table 141 drivers/ps3/ps3av.c table = cmd_table; table 142 drivers/ps3/ps3av.c for (i = 0;; table++, i++) { table 143 drivers/ps3/ps3av.c if ((*table & mask) == (cid & mask)) table 145 drivers/ps3/ps3av.c if (*table == 0) table 148 drivers/ps3/ps3av.c return table; table 153 drivers/ps3/ps3av.c u32 *table; table 156 drivers/ps3/ps3av.c table = ps3av_search_cmd_table(hdr->cid, PS3AV_EVENT_CMD_MASK); table 157 drivers/ps3/ps3av.c if (table) table 291 drivers/ps3/ps3av.c u32 *table; table 297 drivers/ps3/ps3av.c table = ps3av_search_cmd_table(cid, PS3AV_CID_MASK); table 298 drivers/ps3/ps3av.c BUG_ON(!table); table 826 drivers/pwm/core.c void pwm_add_table(struct pwm_lookup *table, size_t num) table 831 drivers/pwm/core.c list_add_tail(&table->list, &pwm_lookup_list); table 832 drivers/pwm/core.c table++; table 843 drivers/pwm/core.c void pwm_remove_table(struct pwm_lookup *table, size_t num) table 848 drivers/pwm/core.c list_del(&table->list); table 849 drivers/pwm/core.c table++; table 517 drivers/pwm/pwm-meson.c channel->mux.table = NULL; table 42 drivers/rapidio/rio-scan.c unsigned long table[0]; table 62 drivers/rapidio/rio-scan.c destid = find_first_zero_bit(idtab->table, idtab->max); table 65 drivers/rapidio/rio-scan.c set_bit(destid, idtab->table); table 89 drivers/rapidio/rio-scan.c oldbit = test_and_set_bit(destid, idtab->table); table 107 drivers/rapidio/rio-scan.c clear_bit(destid, idtab->table); table 121 drivers/rapidio/rio-scan.c destid = find_first_bit(idtab->table, idtab->max); table 141 drivers/rapidio/rio-scan.c destid = find_next_bit(idtab->table, idtab->max, from); table 1492 drivers/rapidio/rio.c u16 table, u16 route_destid, u8 route_port) table 1494 drivers/rapidio/rio.c if (table == RIO_GLOBAL_TABLE) { table 1520 drivers/rapidio/rio.c u16 table, u16 route_destid, u8 *route_port) table 1524 drivers/rapidio/rio.c if (table == RIO_GLOBAL_TABLE) { table 1546 drivers/rapidio/rio.c u16 table) table 1552 drivers/rapidio/rio.c if (table == RIO_GLOBAL_TABLE) { table 1680 drivers/rapidio/rio.c u16 table, u16 route_destid, u8 route_port, int lock) table 1696 drivers/rapidio/rio.c rdev->hopcount, table, table 1700 drivers/rapidio/rio.c rdev->hopcount, table, route_destid, table 1732 drivers/rapidio/rio.c int rio_route_get_entry(struct rio_dev *rdev, u16 table, table 1749 drivers/rapidio/rio.c rdev->hopcount, table, table 1753 drivers/rapidio/rio.c rdev->hopcount, table, route_destid, table 1781 drivers/rapidio/rio.c int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) table 1797 drivers/rapidio/rio.c rdev->hopcount, table); table 1800 drivers/rapidio/rio.c rdev->hopcount, table); table 30 drivers/rapidio/rio.h u16 table, u16 route_destid, u8 route_port, int lock); table 31 drivers/rapidio/rio.h extern int rio_route_get_entry(struct rio_dev *rdev, u16 table, table 33 drivers/rapidio/rio.h extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock); table 88 drivers/rapidio/switches/idt_gen2.c u16 table, u16 route_destid, u8 route_port) table 93 drivers/rapidio/switches/idt_gen2.c if (table == RIO_GLOBAL_TABLE) table 94 drivers/rapidio/switches/idt_gen2.c table = 0; table 96 drivers/rapidio/switches/idt_gen2.c table++; table 102 drivers/rapidio/switches/idt_gen2.c LOCAL_RTE_CONF_DESTID_SEL, table); table 121 drivers/rapidio/switches/idt_gen2.c u16 table, u16 route_destid, u8 *route_port) table 128 drivers/rapidio/switches/idt_gen2.c if (table == RIO_GLOBAL_TABLE) table 129 drivers/rapidio/switches/idt_gen2.c table = 0; table 131 drivers/rapidio/switches/idt_gen2.c table++; table 134 drivers/rapidio/switches/idt_gen2.c LOCAL_RTE_CONF_DESTID_SEL, table); table 153 drivers/rapidio/switches/idt_gen2.c u16 table) table 160 drivers/rapidio/switches/idt_gen2.c if (table == RIO_GLOBAL_TABLE) table 161 drivers/rapidio/switches/idt_gen2.c table = 0; table 163 drivers/rapidio/switches/idt_gen2.c table++; table 166 drivers/rapidio/switches/idt_gen2.c LOCAL_RTE_CONF_DESTID_SEL, table); table 38 drivers/rapidio/switches/idt_gen3.c u16 table, u16 route_destid, u8 route_port) table 45 drivers/rapidio/switches/idt_gen3.c __func__, table, route_destid, entry); table 53 drivers/rapidio/switches/idt_gen3.c if (table == RIO_GLOBAL_TABLE) { table 69 drivers/rapidio/switches/idt_gen3.c if (table >= RIO_GET_TOTAL_PORTS(rval)) table 73 drivers/rapidio/switches/idt_gen3.c RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), table 80 drivers/rapidio/switches/idt_gen3.c u16 table, u16 route_destid, u8 *route_port) table 98 drivers/rapidio/switches/idt_gen3.c if (table == RIO_GLOBAL_TABLE) table 99 drivers/rapidio/switches/idt_gen3.c table = RIO_GET_PORT_NUM(rval); table 100 drivers/rapidio/switches/idt_gen3.c else if (table >= RIO_GET_TOTAL_PORTS(rval)) table 104 drivers/rapidio/switches/idt_gen3.c RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid), table 119 drivers/rapidio/switches/idt_gen3.c u16 table) table 125 drivers/rapidio/switches/idt_gen3.c if (table == RIO_GLOBAL_TABLE) { table 142 drivers/rapidio/switches/idt_gen3.c if (table >= RIO_GET_TOTAL_PORTS(rval)) table 147 drivers/rapidio/switches/idt_gen3.c RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i), table 22 drivers/rapidio/switches/idtcps.c u16 table, u16 route_destid, u8 route_port) table 29 drivers/rapidio/switches/idtcps.c if (table == RIO_GLOBAL_TABLE) { table 46 drivers/rapidio/switches/idtcps.c u16 table, u16 route_destid, u8 *route_port) table 50 drivers/rapidio/switches/idtcps.c if (table == RIO_GLOBAL_TABLE) { table 69 drivers/rapidio/switches/idtcps.c u16 table) table 73 drivers/rapidio/switches/idtcps.c if (table == RIO_GLOBAL_TABLE) { table 34 drivers/rapidio/switches/tsi568.c u16 table, u16 route_destid, u8 route_port) table 36 drivers/rapidio/switches/tsi568.c if (table == RIO_GLOBAL_TABLE) { table 43 drivers/rapidio/switches/tsi568.c SPP_ROUTE_CFG_DESTID(table), table 46 drivers/rapidio/switches/tsi568.c SPP_ROUTE_CFG_PORT(table), route_port); table 56 drivers/rapidio/switches/tsi568.c u16 table, u16 route_destid, u8 *route_port) table 61 drivers/rapidio/switches/tsi568.c if (table == RIO_GLOBAL_TABLE) { table 68 drivers/rapidio/switches/tsi568.c SPP_ROUTE_CFG_DESTID(table), table 71 drivers/rapidio/switches/tsi568.c SPP_ROUTE_CFG_PORT(table), &result); table 83 drivers/rapidio/switches/tsi568.c u16 table) table 90 drivers/rapidio/switches/tsi568.c if (table == RIO_GLOBAL_TABLE) { table 99 drivers/rapidio/switches/tsi568.c SPP_ROUTE_CFG_DESTID(table), table 103 drivers/rapidio/switches/tsi568.c SPP_ROUTE_CFG_PORT(table), table 43 drivers/rapidio/switches/tsi57x.c u16 table, u16 route_destid, u8 route_port) table 45 drivers/rapidio/switches/tsi57x.c if (table == RIO_GLOBAL_TABLE) { table 52 drivers/rapidio/switches/tsi57x.c SPP_ROUTE_CFG_DESTID(table), route_destid); table 54 drivers/rapidio/switches/tsi57x.c SPP_ROUTE_CFG_PORT(table), route_port); table 64 drivers/rapidio/switches/tsi57x.c u16 table, u16 route_destid, u8 *route_port) table 69 drivers/rapidio/switches/tsi57x.c if (table == RIO_GLOBAL_TABLE) { table 74 drivers/rapidio/switches/tsi57x.c table = (result & RIO_SWP_INFO_PORT_NUM_MASK); table 78 drivers/rapidio/switches/tsi57x.c SPP_ROUTE_CFG_DESTID(table), route_destid); table 80 drivers/rapidio/switches/tsi57x.c SPP_ROUTE_CFG_PORT(table), &result); table 91 drivers/rapidio/switches/tsi57x.c u16 table) table 98 drivers/rapidio/switches/tsi57x.c if (table == RIO_GLOBAL_TABLE) { table 107 drivers/rapidio/switches/tsi57x.c SPP_ROUTE_CFG_DESTID(table), 0x80000000); table 110 drivers/rapidio/switches/tsi57x.c SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE); table 260 drivers/regulator/ltc3589.c LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2, table 81 drivers/regulator/tps6507x-regulator.c const unsigned int *table; table 91 drivers/regulator/tps6507x-regulator.c .table = VDCDCx_VSEL_table, table 96 drivers/regulator/tps6507x-regulator.c .table = VDCDCx_VSEL_table, table 101 drivers/regulator/tps6507x-regulator.c .table = VDCDCx_VSEL_table, table 106 drivers/regulator/tps6507x-regulator.c .table = LDO1_VSEL_table, table 111 drivers/regulator/tps6507x-regulator.c .table = LDO2_VSEL_table, table 421 drivers/regulator/tps6507x-regulator.c tps->desc[i].volt_table = info->table; table 336 drivers/regulator/tps6586x-regulator.c struct tps6586x_regulator *table = NULL; table 343 drivers/regulator/tps6586x-regulator.c table = tps658623_regulator; table 348 drivers/regulator/tps6586x-regulator.c table = tps658640_regulator; table 352 drivers/regulator/tps6586x-regulator.c table = tps658643_regulator; table 358 drivers/regulator/tps6586x-regulator.c if (table) { table 360 drivers/regulator/tps6586x-regulator.c ri = &table[i]; table 42 drivers/regulator/twl-regulator.c const u16 *table; table 376 drivers/regulator/twl-regulator.c int mV = info->table[index]; table 468 drivers/regulator/twl-regulator.c .table = label##_VSEL_table, \ table 48 drivers/remoteproc/remoteproc_core.c struct resource_table *table, int len); table 192 drivers/remoteproc/remoteproc_debugfs.c struct resource_table *table = rproc->table_ptr; table 199 drivers/remoteproc/remoteproc_debugfs.c if (!table) { table 204 drivers/remoteproc/remoteproc_debugfs.c for (i = 0; i < table->num; i++) { table 205 drivers/remoteproc/remoteproc_debugfs.c int offset = table->offset[i]; table 206 drivers/remoteproc/remoteproc_debugfs.c struct fw_rsc_hdr *hdr = (void *)table + offset; table 209 drivers/remoteproc/remoteproc_elf_loader.c struct resource_table *table = NULL; table 223 drivers/remoteproc/remoteproc_elf_loader.c table = (struct resource_table *)(elf_data + offset); table 238 drivers/remoteproc/remoteproc_elf_loader.c if (table->ver != 1) { table 239 drivers/remoteproc/remoteproc_elf_loader.c dev_err(dev, "unsupported fw ver: %d\n", table->ver); table 244 drivers/remoteproc/remoteproc_elf_loader.c if (table->reserved[0] || table->reserved[1]) { table 250 drivers/remoteproc/remoteproc_elf_loader.c if (struct_size(table, offset, table->num) > size) { table 276 drivers/remoteproc/remoteproc_elf_loader.c struct resource_table *table = NULL; table 286 drivers/remoteproc/remoteproc_elf_loader.c table = (struct resource_table *)(elf_data + shdr->sh_offset); table 295 drivers/remoteproc/remoteproc_elf_loader.c rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL); table 150 drivers/scsi/3w-9xxx.c static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); table 1969 drivers/scsi/3w-9xxx.c static char *twa_string_lookup(twa_message_type *table, unsigned int code) table 1973 drivers/scsi/3w-9xxx.c for (index = 0; ((code != table[index].code) && table 1974 drivers/scsi/3w-9xxx.c (table[index].text != (char *)0)); index++); table 1975 drivers/scsi/3w-9xxx.c return(table[index].text); table 1462 drivers/scsi/aic7xxx/aic79xx.h int ahd_print_register(const ahd_reg_parse_entry_t *table, table 9629 drivers/scsi/aic7xxx/aic79xx_core.c ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, table 9641 drivers/scsi/aic7xxx/aic79xx_core.c if (table == NULL) { table 9651 drivers/scsi/aic7xxx/aic79xx_core.c if (((value & table[entry].mask) table 9652 drivers/scsi/aic7xxx/aic79xx_core.c != table[entry].value) table 9653 drivers/scsi/aic7xxx/aic79xx_core.c || ((printed_mask & table[entry].mask) table 9654 drivers/scsi/aic7xxx/aic79xx_core.c == table[entry].mask)) table 9659 drivers/scsi/aic7xxx/aic79xx_core.c table[entry].name); table 9660 drivers/scsi/aic7xxx/aic79xx_core.c printed_mask |= table[entry].mask; table 1268 drivers/scsi/aic7xxx/aic7xxx.h int ahc_print_register(const ahc_reg_parse_entry_t *table, table 7087 drivers/scsi/aic7xxx/aic7xxx_core.c ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries, table 7099 drivers/scsi/aic7xxx/aic7xxx_core.c if (table == NULL) { table 7109 drivers/scsi/aic7xxx/aic7xxx_core.c if (((value & table[entry].mask) table 7110 drivers/scsi/aic7xxx/aic7xxx_core.c != table[entry].value) table 7111 drivers/scsi/aic7xxx/aic7xxx_core.c || ((printed_mask & table[entry].mask) table 7112 drivers/scsi/aic7xxx/aic7xxx_core.c == table[entry].mask)) table 7117 drivers/scsi/aic7xxx/aic7xxx_core.c table[entry].name); table 7118 drivers/scsi/aic7xxx/aic7xxx_core.c printed_mask |= table[entry].mask; table 1539 drivers/scsi/be2iscsi/be_mgmt.c req->table[i].icd = inv_tbl[i].icd; table 1540 drivers/scsi/be2iscsi/be_mgmt.c req->table[i].cid = inv_tbl[i].cid; table 49 drivers/scsi/be2iscsi/be_mgmt.h struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ]; table 1222 drivers/scsi/cxgbi/libcxgbi.c *sgl = sdb->table.sgl; table 1223 drivers/scsi/cxgbi/libcxgbi.c *sgcnt = sdb->table.nents; table 1987 drivers/scsi/cxgbi/libcxgbi.c sdb->table.sgl, sdb->table.nents, table 1991 drivers/scsi/cxgbi/libcxgbi.c sdb->table.nents, tdata->offset, sdb->length); table 1998 drivers/scsi/cxgbi/libcxgbi.c sdb->table.nents, tdata->offset, tdata->count); table 3127 drivers/scsi/dpt_i2o.c u32 *table = (u32*)sys_tbl; table 3131 drivers/scsi/dpt_i2o.c count, table[count]); table 209 drivers/scsi/fcoe/fcoe_sysfs.c #define fcoe_enum_name_search(title, table_type, table) \ table 212 drivers/scsi/fcoe/fcoe_sysfs.c if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \ table 214 drivers/scsi/fcoe/fcoe_sysfs.c return table[table_key]; \ table 82 drivers/scsi/isci/phy.c struct isci_phy *table = iphy - iphy->phy_index; table 83 drivers/scsi/isci/phy.c struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]); table 76 drivers/scsi/isci/port.c struct isci_port *table; table 82 drivers/scsi/isci/port.c table = iport - i; table 83 drivers/scsi/isci/port.c ihost = container_of(table, typeof(*ihost), ports[0]); table 1723 drivers/scsi/isci/registers.h u32 table[0xE0]; table 1728 drivers/scsi/isci/registers.h u32 table[256]; table 1738 drivers/scsi/isci/registers.h u32 table[2048]; table 522 drivers/scsi/iscsi_tcp.c err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl, table 523 drivers/scsi/iscsi_tcp.c sdb->table.nents, offset, table 220 drivers/scsi/libfc/fc_exch.c static inline const char *fc_exch_name_lookup(unsigned int op, char **table, table 226 drivers/scsi/libfc/fc_exch.c name = table[op]; table 713 drivers/scsi/libiscsi_tcp.c sdb->table.sgl, table 714 drivers/scsi/libiscsi_tcp.c sdb->table.nents, table 356 drivers/scsi/myrb.c struct myrb_error_entry *table = cb->err_table; table 366 drivers/scsi/myrb.c new = table + err_table_offset; table 65 drivers/scsi/nsp32_debug.c const char **table = commands[ group(opcode) ]; table 67 drivers/scsi/nsp32_debug.c switch ((unsigned long) table) { table 78 drivers/scsi/nsp32_debug.c if (table[opcode & 0x1f] != unknown) table 79 drivers/scsi/nsp32_debug.c printk("%s[%02x] ", table[opcode & 0x1f], opcode); table 67 drivers/scsi/pcmcia/nsp_debug.c const char **table = commands[ group(opcode) ]; table 69 drivers/scsi/pcmcia/nsp_debug.c switch ((unsigned long) table) { table 80 drivers/scsi/pcmcia/nsp_debug.c if (table[opcode & 0x1f] != unknown) table 81 drivers/scsi/pcmcia/nsp_debug.c printk("%s[%02x] ", table[opcode & 0x1f], opcode); table 209 drivers/scsi/qedi/qedi.h unsigned long *table; table 529 drivers/scsi/qedi/qedi_main.c id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); table 530 drivers/scsi/qedi/qedi_main.c if (!id_tbl->table) table 538 drivers/scsi/qedi/qedi_main.c kfree(id_tbl->table); table 539 drivers/scsi/qedi/qedi_main.c id_tbl->table = NULL; table 551 drivers/scsi/qedi/qedi_main.c if (!test_bit(id, id_tbl->table)) { table 552 drivers/scsi/qedi/qedi_main.c set_bit(id, id_tbl->table); table 564 drivers/scsi/qedi/qedi_main.c id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); table 568 drivers/scsi/qedi/qedi_main.c id = find_first_zero_bit(id_tbl->table, id_tbl->next); table 575 drivers/scsi/qedi/qedi_main.c set_bit(id, id_tbl->table); table 594 drivers/scsi/qedi/qedi_main.c clear_bit(id, id_tbl->table); table 1013 drivers/scsi/scsi_debug.c act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, table 1037 drivers/scsi/scsi_debug.c act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, table 2503 drivers/scsi/scsi_debug.c ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, table 2510 drivers/scsi/scsi_debug.c ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, table 988 drivers/scsi/scsi_error.c scmd->sdb.table.sgl = &ses->sense_sgl; table 990 drivers/scsi/scsi_error.c scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1; table 556 drivers/scsi/scsi_lib.c if (cmd->sdb.table.nents) table 557 drivers/scsi/scsi_lib.c sg_free_table_chained(&cmd->sdb.table, table 560 drivers/scsi/scsi_lib.c sg_free_table_chained(&cmd->prot_sdb->table, table 993 drivers/scsi/scsi_lib.c if (unlikely(sg_alloc_table_chained(&sdb->table, table 994 drivers/scsi/scsi_lib.c blk_rq_nr_phys_segments(req), sdb->table.sgl, table 1002 drivers/scsi/scsi_lib.c count = blk_rq_map_sg(req->q, req, sdb->table.sgl); table 1003 drivers/scsi/scsi_lib.c BUG_ON(count > sdb->table.nents); table 1004 drivers/scsi/scsi_lib.c sdb->table.nents = count; table 1048 drivers/scsi/scsi_lib.c if (sg_alloc_table_chained(&prot_sdb->table, ivecs, table 1049 drivers/scsi/scsi_lib.c prot_sdb->table.sgl, table 1056 drivers/scsi/scsi_lib.c prot_sdb->table.sgl); table 1061 drivers/scsi/scsi_lib.c cmd->prot_sdb->table.nents = count; table 1593 drivers/scsi/scsi_lib.c cmd->sdb.table.sgl = sg; table 1598 drivers/scsi/scsi_lib.c cmd->prot_sdb->table.sgl = table 64 drivers/scsi/scsi_transport_fc.c #define fc_enum_name_search(title, table_type, table) \ table 70 drivers/scsi/scsi_transport_fc.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 71 drivers/scsi/scsi_transport_fc.c if (table[i].value == table_key) { \ table 72 drivers/scsi/scsi_transport_fc.c name = table[i].name; \ table 79 drivers/scsi/scsi_transport_fc.c #define fc_enum_name_match(title, table_type, table) \ table 85 drivers/scsi/scsi_transport_fc.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 86 drivers/scsi/scsi_transport_fc.c if (strncmp(table_key, table[i].name, \ table 87 drivers/scsi/scsi_transport_fc.c table[i].matchlen) == 0) { \ table 88 drivers/scsi/scsi_transport_fc.c *value = table[i].value; \ table 203 drivers/scsi/scsi_transport_fc.c #define fc_bitfield_name_search(title, table) \ table 211 drivers/scsi/scsi_transport_fc.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 212 drivers/scsi/scsi_transport_fc.c if (table[i].value & table_key) { \ table 214 drivers/scsi/scsi_transport_fc.c prefix, table[i].name); \ table 67 drivers/scsi/scsi_transport_sas.c #define sas_bitfield_name_match(title, table) \ table 75 drivers/scsi/scsi_transport_sas.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 76 drivers/scsi/scsi_transport_sas.c if (table[i].value & table_key) { \ table 78 drivers/scsi/scsi_transport_sas.c prefix, table[i].name); \ table 86 drivers/scsi/scsi_transport_sas.c #define sas_bitfield_name_set(title, table) \ table 93 drivers/scsi/scsi_transport_sas.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 94 drivers/scsi/scsi_transport_sas.c len = strlen(table[i].name); \ table 95 drivers/scsi/scsi_transport_sas.c if (strncmp(buf, table[i].name, len) == 0 && \ table 97 drivers/scsi/scsi_transport_sas.c *table_key = table[i].value; \ table 104 drivers/scsi/scsi_transport_sas.c #define sas_bitfield_name_search(title, table) \ table 111 drivers/scsi/scsi_transport_sas.c for (i = 0; i < ARRAY_SIZE(table); i++) { \ table 112 drivers/scsi/scsi_transport_sas.c if (table[i].value == table_key) { \ table 114 drivers/scsi/scsi_transport_sas.c table[i].name); \ table 431 drivers/scsi/stex.c struct st_sgitem *table; table 445 drivers/scsi/stex.c table = (struct st_sgitem *)(dst + 1); table 447 drivers/scsi/stex.c table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table 448 drivers/scsi/stex.c table[i].addr = cpu_to_le64(sg_dma_address(sg)); table 449 drivers/scsi/stex.c table[i].ctrl = SG_CF_64B | SG_CF_HOST; table 451 drivers/scsi/stex.c table[--i].ctrl |= SG_CF_EOT; table 463 drivers/scsi/stex.c struct st_ss_sgitem *table; table 477 drivers/scsi/stex.c table = (struct st_ss_sgitem *)(dst + 1); table 479 drivers/scsi/stex.c table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table 480 drivers/scsi/stex.c table[i].addr = table 482 drivers/scsi/stex.c table[i].addr_hi = table 418 drivers/scsi/virtio_scsi.c out = &sc->sdb.table; table 420 drivers/scsi/virtio_scsi.c in = &sc->sdb.table; table 142 drivers/sfi/sfi_acpi.c static void sfi_acpi_put_table(struct acpi_table_header *table) table 144 drivers/sfi/sfi_acpi.c sfi_put_table(acpi_to_sfi_th(table)); table 155 drivers/sfi/sfi_acpi.c struct acpi_table_header *table = NULL; table 166 drivers/sfi/sfi_acpi.c table = sfi_acpi_get_table(&key); table 167 drivers/sfi/sfi_acpi.c if (!table) table 170 drivers/sfi/sfi_acpi.c ret = handler(table); table 171 drivers/sfi/sfi_acpi.c sfi_acpi_put_table(table); table 78 drivers/sfi/sfi_core.c #define TABLE_ON_PAGE(page, table, size) (ON_SAME_PAGE(page, table) && \ table 79 drivers/sfi/sfi_core.c ON_SAME_PAGE(page, table + size)) table 134 drivers/sfi/sfi_core.c static int sfi_verify_table(struct sfi_table_header *table) table 138 drivers/sfi/sfi_core.c u8 *puchar = (u8 *)table; table 139 drivers/sfi/sfi_core.c u32 length = table->len; table 152 drivers/sfi/sfi_core.c table->csum, table->csum - checksum); table 284 drivers/sfi/sfi_core.c struct sfi_table_header *table = NULL; table 295 drivers/sfi/sfi_core.c table = sfi_get_table(&key); table 296 drivers/sfi/sfi_core.c if (!table) table 299 drivers/sfi/sfi_core.c ret = handler(table); table 300 drivers/sfi/sfi_core.c sfi_put_table(table); table 79 drivers/sfi/sfi_core.h extern void sfi_put_table(struct sfi_table_header *table); table 132 drivers/sh/clk/cpg.c struct clk_div_mult_table *table = clk_to_div_mult_table(clk); table 135 drivers/sh/clk/cpg.c clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, table 136 drivers/sh/clk/cpg.c table, clk->arch_flags ? &clk->arch_flags : NULL); table 242 drivers/sh/clk/cpg.c struct clk_div_table *table, struct sh_clk_ops *ops) table 246 drivers/sh/clk/cpg.c int nr_divs = table->div_mult_table->nr_divisors; table 262 drivers/sh/clk/cpg.c clkp->priv = table; table 296 drivers/sh/clk/cpg.c struct clk_div_mult_table *table = clk_to_div_mult_table(clk); table 321 drivers/sh/clk/cpg.c clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, table 322 drivers/sh/clk/cpg.c table, NULL); table 353 drivers/sh/clk/cpg.c struct clk_div_mult_table *table = clk_to_div_mult_table(clk); table 374 drivers/sh/clk/cpg.c clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, table 375 drivers/sh/clk/cpg.c table, &clk->arch_flags); table 390 drivers/sh/clk/cpg.c struct clk_div4_table *table) table 392 drivers/sh/clk/cpg.c return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops); table 396 drivers/sh/clk/cpg.c struct clk_div4_table *table) table 398 drivers/sh/clk/cpg.c return sh_clk_div_register_ops(clks, nr, table, table 403 drivers/sh/clk/cpg.c struct clk_div4_table *table) table 405 drivers/sh/clk/cpg.c return sh_clk_div_register_ops(clks, nr, table, table 140 drivers/staging/android/ion/ion.c static struct sg_table *dup_sg_table(struct sg_table *table) table 150 drivers/staging/android/ion/ion.c ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL); table 157 drivers/staging/android/ion/ion.c for_each_sg(table->sgl, sg, table->nents, i) { table 166 drivers/staging/android/ion/ion.c static void free_duped_table(struct sg_table *table) table 168 drivers/staging/android/ion/ion.c sg_free_table(table); table 169 drivers/staging/android/ion/ion.c kfree(table); table 174 drivers/staging/android/ion/ion.c struct sg_table *table; table 182 drivers/staging/android/ion/ion.c struct sg_table *table; table 189 drivers/staging/android/ion/ion.c table = dup_sg_table(buffer->sg_table); table 190 drivers/staging/android/ion/ion.c if (IS_ERR(table)) { table 195 drivers/staging/android/ion/ion.c a->table = table; table 217 drivers/staging/android/ion/ion.c free_duped_table(a->table); table 226 drivers/staging/android/ion/ion.c struct sg_table *table; table 228 drivers/staging/android/ion/ion.c table = a->table; table 230 drivers/staging/android/ion/ion.c if (!dma_map_sg(attachment->dev, table->sgl, table->nents, table 234 drivers/staging/android/ion/ion.c return table; table 238 drivers/staging/android/ion/ion.c struct sg_table *table, table 241 drivers/staging/android/ion/ion.c dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); table 312 drivers/staging/android/ion/ion.c dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, table 335 drivers/staging/android/ion/ion.c dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, table 32 drivers/staging/android/ion/ion_cma_heap.c struct sg_table *table; table 62 drivers/staging/android/ion/ion_cma_heap.c table = kmalloc(sizeof(*table), GFP_KERNEL); table 63 drivers/staging/android/ion/ion_cma_heap.c if (!table) table 66 drivers/staging/android/ion/ion_cma_heap.c ret = sg_alloc_table(table, 1, GFP_KERNEL); table 70 drivers/staging/android/ion/ion_cma_heap.c sg_set_page(table->sgl, pages, size, 0); table 73 drivers/staging/android/ion/ion_cma_heap.c buffer->sg_table = table; table 77 drivers/staging/android/ion/ion_cma_heap.c kfree(table); table 27 drivers/staging/android/ion/ion_heap.c struct sg_table *table = buffer->sg_table; table 41 drivers/staging/android/ion/ion_heap.c for_each_sg(table->sgl, sg, table->nents, i) { table 67 drivers/staging/android/ion/ion_heap.c struct sg_table *table = buffer->sg_table; table 74 drivers/staging/android/ion/ion_heap.c for_each_sg(table->sgl, sg, table->nents, i) { table 137 drivers/staging/android/ion/ion_heap.c struct sg_table *table = buffer->sg_table; table 145 drivers/staging/android/ion/ion_heap.c return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); table 105 drivers/staging/android/ion/ion_system_heap.c struct sg_table *table; table 127 drivers/staging/android/ion/ion_system_heap.c table = kmalloc(sizeof(*table), GFP_KERNEL); table 128 drivers/staging/android/ion/ion_system_heap.c if (!table) table 131 drivers/staging/android/ion/ion_system_heap.c if (sg_alloc_table(table, i, GFP_KERNEL)) table 134 drivers/staging/android/ion/ion_system_heap.c sg = table->sgl; table 141 drivers/staging/android/ion/ion_system_heap.c buffer->sg_table = table; table 145 drivers/staging/android/ion/ion_system_heap.c kfree(table); table 157 drivers/staging/android/ion/ion_system_heap.c struct sg_table *table = buffer->sg_table; table 165 drivers/staging/android/ion/ion_system_heap.c for_each_sg(table->sgl, sg, table->nents, i) table 167 drivers/staging/android/ion/ion_system_heap.c sg_free_table(table); table 168 drivers/staging/android/ion/ion_system_heap.c kfree(table); table 291 drivers/staging/android/ion/ion_system_heap.c struct sg_table *table; table 305 drivers/staging/android/ion/ion_system_heap.c table = kmalloc(sizeof(*table), GFP_KERNEL); table 306 drivers/staging/android/ion/ion_system_heap.c if (!table) { table 311 drivers/staging/android/ion/ion_system_heap.c ret = sg_alloc_table(table, 1, GFP_KERNEL); table 315 drivers/staging/android/ion/ion_system_heap.c sg_set_page(table->sgl, page, len, 0); table 317 drivers/staging/android/ion/ion_system_heap.c buffer->sg_table = table; table 322 drivers/staging/android/ion/ion_system_heap.c kfree(table); table 332 drivers/staging/android/ion/ion_system_heap.c struct sg_table *table = buffer->sg_table; table 333 drivers/staging/android/ion/ion_system_heap.c struct page *page = sg_page(table->sgl); table 339 drivers/staging/android/ion/ion_system_heap.c sg_free_table(table); table 340 drivers/staging/android/ion/ion_system_heap.c kfree(table); table 50 drivers/staging/comedi/drivers/ni_routes.c #define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)]) table 27 drivers/staging/comedi/drivers/tests/ni_routes_test.c #define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)]) table 217 drivers/staging/comedi/drivers/tests/ni_routes_test.c const u8 *table, *oldtable; table 222 drivers/staging/comedi/drivers/tests/ni_routes_test.c table = private.routing_tables.route_values; table 246 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(PXI_Star), B(NI_AI_SampleClock)) == V(17) && table 247 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(NI_10MHzRefClock), B(TRIGGER_LINE(0))) == 0 && table 248 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(NI_AI_ConvertClock), B(NI_PFI(0))) == 0 && table 249 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(NI_AI_ConvertClock), B(NI_PFI(2))) == table 254 drivers/staging/comedi/drivers/tests/ni_routes_test.c oldtable = table; table 258 drivers/staging/comedi/drivers/tests/ni_routes_test.c table = private.routing_tables.route_values; table 262 drivers/staging/comedi/drivers/tests/ni_routes_test.c unittest(oldtable != table, "pci-6220 find other route_values table\n"); table 265 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(PXI_Star), B(NI_AI_SampleClock)) == V(20) && table 266 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(NI_10MHzRefClock), B(TRIGGER_LINE(0))) == V(12) && table 267 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(NI_AI_ConvertClock), B(NI_PFI(0))) == V(3) && table 268 drivers/staging/comedi/drivers/tests/ni_routes_test.c RVi(table, B(NI_AI_ConvertClock), B(NI_PFI(2))) == V(3), table 1571 drivers/staging/gasket/gasket_core.c const struct gasket_num_name *table) table 1575 drivers/staging/gasket/gasket_core.c while (table[i].snn_name) { table 1576 drivers/staging/gasket/gasket_core.c if (num == table[i].snn_num) table 1581 drivers/staging/gasket/gasket_core.c return table[i].snn_name; table 576 drivers/staging/gasket/gasket_core.h const struct gasket_num_name *table); table 479 drivers/staging/kpc2000/kpc2000_spi.c #define NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(table) \ table 480 drivers/staging/kpc2000/kpc2000_spi.c for (i = 0 ; i < ARRAY_SIZE(table) ; i++) { \ table 481 drivers/staging/kpc2000/kpc2000_spi.c spi_new_device(master, &(table[i])); \ table 41 drivers/staging/media/tegra-vde/trace.h TP_PROTO(unsigned int table, unsigned int row, u32 value, u32 aux_addr), table 42 drivers/staging/media/tegra-vde/trace.h TP_ARGS(table, row, value, aux_addr), table 44 drivers/staging/media/tegra-vde/trace.h __field(unsigned int, table) table 50 drivers/staging/media/tegra-vde/trace.h __entry->table = table; table 56 drivers/staging/media/tegra-vde/trace.h __entry->table, __entry->row, __entry->value, table 216 drivers/staging/media/tegra-vde/vde.c unsigned int table, table 222 drivers/staging/media/tegra-vde/vde.c trace_vde_setup_iram_entry(table, row, value1, value2); table 224 drivers/staging/media/tegra-vde/vde.c iram_tables[0x20 * table + row * 2] = value1; table 225 drivers/staging/media/tegra-vde/vde.c iram_tables[0x20 * table + row * 2 + 1] = value2; table 51 drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c .table = { table 422 drivers/target/target_core_rd.c struct rd_dev_sg_table *table; table 442 drivers/target/target_core_rd.c table = rd_get_sg_table(dev, rd_page); table 443 drivers/target/target_core_rd.c if (!table) table 446 drivers/target/target_core_rd.c rd_sg = &table->sg_table[rd_page - table->page_start_offset]; table 505 drivers/target/target_core_rd.c if (rd_page <= table->page_end_offset) { table 510 drivers/target/target_core_rd.c table = rd_get_sg_table(dev, rd_page); table 511 drivers/target/target_core_rd.c if (!table) { table 517 drivers/target/target_core_rd.c rd_sg = table->sg_table; table 62 drivers/tee/tee_shm.c struct sg_table *table, table 98 drivers/thermal/clock_cooling.c struct cpufreq_frequency_table *pos, *table = ccdev->freq_table; table 103 drivers/thermal/clock_cooling.c if (!table) table 106 drivers/thermal/clock_cooling.c cpufreq_for_each_valid_entry(pos, table) { table 136 drivers/thermal/clock_cooling.c cpufreq_for_each_valid_entry(pos, table) { table 499 drivers/thermal/cpu_cooling.c static unsigned int find_next_max(struct cpufreq_frequency_table *table, table 505 drivers/thermal/cpu_cooling.c cpufreq_for_each_valid_entry(pos, table) { table 113 drivers/thermal/rockchip_thermal.c int (*get_temp)(const struct chip_tsadc_table *table, table 115 drivers/thermal/rockchip_thermal.c int (*set_alarm_temp)(const struct chip_tsadc_table *table, table 117 drivers/thermal/rockchip_thermal.c int (*set_tshut_temp)(const struct chip_tsadc_table *table, table 122 drivers/thermal/rockchip_thermal.c struct chip_tsadc_table table; table 474 drivers/thermal/rockchip_thermal.c static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table, table 480 drivers/thermal/rockchip_thermal.c u32 error = table->data_mask; table 483 drivers/thermal/rockchip_thermal.c high = (table->length - 1) - 1; /* ignore the last check for table */ table 487 drivers/thermal/rockchip_thermal.c if (temp < table->id[low].temp || temp > table->id[high].temp) table 491 drivers/thermal/rockchip_thermal.c if (temp == table->id[mid].temp) table 492 drivers/thermal/rockchip_thermal.c return table->id[mid].code; table 493 drivers/thermal/rockchip_thermal.c else if (temp < table->id[mid].temp) table 506 drivers/thermal/rockchip_thermal.c num = abs(table->id[mid + 1].code - table->id[mid].code); table 507 drivers/thermal/rockchip_thermal.c num *= temp - table->id[mid].temp; table 508 drivers/thermal/rockchip_thermal.c denom = table->id[mid + 1].temp - table->id[mid].temp; table 510 drivers/thermal/rockchip_thermal.c switch (table->mode) { table 512 drivers/thermal/rockchip_thermal.c return table->id[mid].code - (num / denom); table 514 drivers/thermal/rockchip_thermal.c return table->id[mid].code + (num / denom); table 516 drivers/thermal/rockchip_thermal.c pr_err("%s: unknown table mode: %d\n", __func__, table->mode); table 526 drivers/thermal/rockchip_thermal.c static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table, table 530 drivers/thermal/rockchip_thermal.c unsigned int high = table->length - 1; table 535 drivers/thermal/rockchip_thermal.c WARN_ON(table->length < 2); table 537 drivers/thermal/rockchip_thermal.c switch (table->mode) { table 539 drivers/thermal/rockchip_thermal.c code &= table->data_mask; table 540 drivers/thermal/rockchip_thermal.c if (code <= table->id[high].code) table 544 drivers/thermal/rockchip_thermal.c if (code >= table->id[mid].code && table 545 drivers/thermal/rockchip_thermal.c code < table->id[mid - 1].code) table 547 drivers/thermal/rockchip_thermal.c else if (code < table->id[mid].code) table 556 drivers/thermal/rockchip_thermal.c code &= table->data_mask; table 557 drivers/thermal/rockchip_thermal.c if (code < table->id[low].code) table 561 drivers/thermal/rockchip_thermal.c if (code <= table->id[mid].code && table 562 drivers/thermal/rockchip_thermal.c code > table->id[mid - 1].code) table 564 drivers/thermal/rockchip_thermal.c else if (code > table->id[mid].code) table 573 drivers/thermal/rockchip_thermal.c pr_err("%s: unknown table mode: %d\n", __func__, table->mode); table 583 drivers/thermal/rockchip_thermal.c num = table->id[mid].temp - table->id[mid - 1].temp; table 584 drivers/thermal/rockchip_thermal.c num *= abs(table->id[mid - 1].code - code); table 585 drivers/thermal/rockchip_thermal.c denom = abs(table->id[mid - 1].code - table->id[mid].code); table 586 drivers/thermal/rockchip_thermal.c *temp = table->id[mid - 1].temp + (num / denom); table 744 drivers/thermal/rockchip_thermal.c static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table, table 751 drivers/thermal/rockchip_thermal.c return rk_tsadcv2_code_to_temp(table, val, temp); table 754 drivers/thermal/rockchip_thermal.c static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table, table 774 drivers/thermal/rockchip_thermal.c alarm_value = rk_tsadcv2_temp_to_code(table, temp); table 775 drivers/thermal/rockchip_thermal.c if (alarm_value == table->data_mask) table 778 drivers/thermal/rockchip_thermal.c writel_relaxed(alarm_value & table->data_mask, table 788 drivers/thermal/rockchip_thermal.c static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table, table 794 drivers/thermal/rockchip_thermal.c tshut_value = rk_tsadcv2_temp_to_code(table, temp); table 795 drivers/thermal/rockchip_thermal.c if (tshut_value == table->data_mask) table 840 drivers/thermal/rockchip_thermal.c .table = { table 864 drivers/thermal/rockchip_thermal.c .table = { table 888 drivers/thermal/rockchip_thermal.c .table = { table 913 drivers/thermal/rockchip_thermal.c .table = { table 936 drivers/thermal/rockchip_thermal.c .table = { table 961 drivers/thermal/rockchip_thermal.c .table = { table 986 drivers/thermal/rockchip_thermal.c .table = { table 1011 drivers/thermal/rockchip_thermal.c .table = { table 1089 drivers/thermal/rockchip_thermal.c return tsadc->set_alarm_temp(&tsadc->table, table 1100 drivers/thermal/rockchip_thermal.c retval = tsadc->get_temp(&tsadc->table, table 1187 drivers/thermal/rockchip_thermal.c error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, table 1403 drivers/thermal/rockchip_thermal.c error = thermal->chip->set_tshut_temp(&thermal->chip->table, table 22 drivers/thermal/thermal_core.h #define THERMAL_TABLE_ENTRY(table, name) \ table 24 drivers/thermal/thermal_core.h __used __section(__##table##_thermal_table) = &name table 1039 drivers/tty/serial/msm_serial.c static const struct msm_baud_map table[] = { table 1058 drivers/tty/serial/msm_serial.c best = table; /* Default to smallest divider */ table 1062 drivers/tty/serial/msm_serial.c end = table + ARRAY_SIZE(table); table 1063 drivers/tty/serial/msm_serial.c entry = table; table 1089 drivers/tty/serial/msm_serial.c entry = table; table 80 drivers/usb/dwc3/dwc3-pci.c .table = { table 741 drivers/usb/gadget/udc/bdc/bdc_ep.c struct bd_table *table; table 821 drivers/usb/gadget/udc/bdc/bdc_ep.c table = ep->bd_list.bd_table_array[tbi]; table 822 drivers/usb/gadget/udc/bdc/bdc_ep.c next_bd_dma = table->dma + table 36 drivers/usb/gadget/usbstring.c usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf) table 45 drivers/usb/gadget/usbstring.c buf [2] = (u8) table->language; table 46 drivers/usb/gadget/usbstring.c buf [3] = (u8) (table->language >> 8); table 49 drivers/usb/gadget/usbstring.c for (s = table->strings; s && s->s; s++) table 554 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_palette(struct vc_data *c, const unsigned char *table) table 576 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_setreg(sisusb, SISCOLIDX, table[i])) table 769 drivers/usb/misc/sisusbvga/sisusb_init.c const unsigned char *table = NULL; table 780 drivers/usb/misc/sisusbvga/sisusb_init.c table = SiS_MDA_DAC; table 782 drivers/usb/misc/sisusbvga/sisusb_init.c table = SiS_CGA_DAC; table 784 drivers/usb/misc/sisusbvga/sisusb_init.c table = SiS_EGA_DAC; table 788 drivers/usb/misc/sisusbvga/sisusb_init.c table = SiS_VGA_DAC; table 799 drivers/usb/misc/sisusbvga/sisusb_init.c data = table[i]; table 813 drivers/usb/misc/sisusbvga/sisusb_init.c data = table[i] << sf; table 824 drivers/usb/misc/sisusbvga/sisusb_init.c table[di], table[bx], table 825 drivers/usb/misc/sisusbvga/sisusb_init.c table[si]); table 831 drivers/usb/misc/sisusbvga/sisusb_init.c table[di], table[si], table 832 drivers/usb/misc/sisusbvga/sisusb_init.c table[bx]); table 458 drivers/usb/storage/isd200.c srb->sdb.table.sgl = buff ? &info->sg : NULL; table 460 drivers/usb/storage/isd200.c srb->sdb.table.nents = buff ? 1 : 0; table 461 drivers/usb/storage/uas.c urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0; table 462 drivers/usb/storage/uas.c urb->sg = sdb->table.sgl; table 316 drivers/vfio/pci/vfio_pci.c u32 table; table 319 drivers/vfio/pci/vfio_pci.c pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table); table 321 drivers/vfio/pci/vfio_pci.c vdev->msix_bar = table & PCI_MSIX_TABLE_BIR; table 322 drivers/vfio/pci/vfio_pci.c vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET; table 163 drivers/vhost/vhost.c poll = container_of(pt, struct vhost_poll, table); table 192 drivers/vhost/vhost.c init_poll_funcptr(&poll->table, vhost_poll_func); table 210 drivers/vhost/vhost.c mask = vfs_poll(file, &poll->table); table 29 drivers/vhost/vhost.h poll_table table; table 233 drivers/video/backlight/pwm_bl.c unsigned int *table; table 324 drivers/video/backlight/pwm_bl.c size = sizeof(*table) * num_levels; table 325 drivers/video/backlight/pwm_bl.c table = devm_kzalloc(dev, size, GFP_KERNEL); table 326 drivers/video/backlight/pwm_bl.c if (!table) table 336 drivers/video/backlight/pwm_bl.c table[levels_count] = value; table 341 drivers/video/backlight/pwm_bl.c table[levels_count] = data->levels[i]; table 345 drivers/video/backlight/pwm_bl.c table[levels_count] = data->levels[i]; table 353 drivers/video/backlight/pwm_bl.c data->levels = table; table 870 drivers/video/console/vgacon.c static void vga_set_palette(struct vc_data *vc, const unsigned char *table) table 876 drivers/video/console/vgacon.c vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]); table 883 drivers/video/console/vgacon.c static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table) table 888 drivers/video/console/vgacon.c vga_set_palette(vc, table); table 198 drivers/video/fbdev/core/fbcon.c static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); table 2726 drivers/video/fbdev/core/fbcon.c static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) table 2741 drivers/video/fbdev/core/fbcon.c k = table[i]; table 543 drivers/video/fbdev/matrox/matroxfb_base.c static const struct RGBT table[]= { table 620 drivers/video/fbdev/matrox/matroxfb_base.c for (rgbt = table; rgbt->bpp < bpp; rgbt++); table 2879 drivers/video/fbdev/sis/init.c const unsigned char *table = NULL; table 2884 drivers/video/fbdev/sis/init.c if(data == 0x00) table = SiS_MDA_DAC; table 2885 drivers/video/fbdev/sis/init.c else if(data == 0x08) table = SiS_CGA_DAC; table 2886 drivers/video/fbdev/sis/init.c else if(data == 0x10) table = SiS_EGA_DAC; table 2890 drivers/video/fbdev/sis/init.c table = SiS_VGA_DAC; table 2910 drivers/video/fbdev/sis/init.c data = table[i]; table 2922 drivers/video/fbdev/sis/init.c data = table[i] << sf; table 2931 drivers/video/fbdev/sis/init.c SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[bx], table[si]); table 2936 drivers/video/fbdev/sis/init.c SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[si], table[bx]); table 103 drivers/watchdog/max63xx_wdt.c max63xx_select_timeout(struct max63xx_timeout *table, int value) table 105 drivers/watchdog/max63xx_wdt.c while (table->twd) { table 106 drivers/watchdog/max63xx_wdt.c if (value <= table->twd) { table 107 drivers/watchdog/max63xx_wdt.c if (nodelay && table->tdelay == 0) table 108 drivers/watchdog/max63xx_wdt.c return table; table 111 drivers/watchdog/max63xx_wdt.c return table; table 114 drivers/watchdog/max63xx_wdt.c table++; table 205 drivers/watchdog/max63xx_wdt.c struct max63xx_timeout *table; table 212 drivers/watchdog/max63xx_wdt.c table = (struct max63xx_timeout *)pdev->id_entry->driver_data; table 217 drivers/watchdog/max63xx_wdt.c wdt->timeout = max63xx_select_timeout(table, heartbeat); table 19 fs/affs/affs.h #define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)]) table 42 fs/affs/amigaffs.c hash_ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[offset]); table 55 fs/affs/amigaffs.c AFFS_HEAD(dir_bh)->table[offset] = cpu_to_be32(ino); table 94 fs/affs/amigaffs.c hash_ino = be32_to_cpu(AFFS_HEAD(bh)->table[offset]); table 99 fs/affs/amigaffs.c AFFS_HEAD(bh)->table[offset] = ino; table 247 fs/affs/amigaffs.c if (AFFS_HEAD(bh)->table[size]) table 83 fs/affs/amigaffs.h __be32 table[1]; table 89 fs/affs/dir.c ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]); table 106 fs/affs/dir.c ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]); table 144 fs/affs/inode.c inode->i_size = strlen((char *)AFFS_HEAD(bh)->table); table 183 fs/affs/namei.c key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]); table 342 fs/affs/namei.c p = (char *)AFFS_HEAD(bh)->table; table 85 fs/aio.c struct kioctx __rcu *table[]; table 332 fs/aio.c struct kioctx_table *table; table 337 fs/aio.c table = rcu_dereference(mm->ioctx_table); table 338 fs/aio.c for (i = 0; i < table->nr; i++) { table 341 fs/aio.c ctx = rcu_dereference(table->table[i]); table 637 fs/aio.c struct kioctx_table *table, *old; table 641 fs/aio.c table = rcu_dereference_raw(mm->ioctx_table); table 644 fs/aio.c if (table) table 645 fs/aio.c for (i = 0; i < table->nr; i++) table 646 fs/aio.c if (!rcu_access_pointer(table->table[i])) { table 648 fs/aio.c rcu_assign_pointer(table->table[i], ctx); table 661 fs/aio.c new_nr = (table ? table->nr : 1) * 4; table 664 fs/aio.c table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * table 666 fs/aio.c if (!table) table 669 fs/aio.c table->nr = new_nr; table 675 fs/aio.c rcu_assign_pointer(mm->ioctx_table, table); table 676 fs/aio.c } else if (table->nr > old->nr) { table 677 fs/aio.c memcpy(table->table, old->table, table 680 fs/aio.c rcu_assign_pointer(mm->ioctx_table, table); table 683 fs/aio.c kfree(table); table 684 fs/aio.c table = old; table 820 fs/aio.c struct kioctx_table *table; table 828 fs/aio.c table = rcu_dereference_raw(mm->ioctx_table); table 829 fs/aio.c WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); table 830 fs/aio.c RCU_INIT_POINTER(table->table[ctx->id], NULL); table 863 fs/aio.c struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); table 867 fs/aio.c if (!table) table 870 fs/aio.c atomic_set(&wait.count, table->nr); table 874 fs/aio.c for (i = 0; i < table->nr; ++i) { table 876 fs/aio.c rcu_dereference_protected(table->table[i], true); table 900 fs/aio.c kfree(table); table 1053 fs/aio.c struct kioctx_table *table; table 1060 fs/aio.c table = rcu_dereference(mm->ioctx_table); table 1062 fs/aio.c if (!table || id >= table->nr) table 1065 fs/aio.c id = array_index_nospec(id, table->nr); table 1066 fs/aio.c ctx = rcu_dereference(table->table[id]); table 197 fs/btrfs/check-integrity.c struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; table 201 fs/btrfs/check-integrity.c struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; table 205 fs/btrfs/check-integrity.c struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; table 493 fs/btrfs/check-integrity.c INIT_LIST_HEAD(h->table + i); table 504 fs/btrfs/check-integrity.c list_add(&b->collision_resolving_node, h->table + hashval); table 523 fs/btrfs/check-integrity.c list_for_each_entry(b, h->table + hashval, collision_resolving_node) { table 537 fs/btrfs/check-integrity.c INIT_LIST_HEAD(h->table + i); table 553 fs/btrfs/check-integrity.c list_add(&l->collision_resolving_node, h->table + hashval); table 576 fs/btrfs/check-integrity.c list_for_each_entry(l, h->table + hashval, collision_resolving_node) { table 595 fs/btrfs/check-integrity.c INIT_LIST_HEAD(h->table + i); table 606 fs/btrfs/check-integrity.c list_add(&ds->collision_resolving_node, h->table + hashval); table 621 fs/btrfs/check-integrity.c list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { table 51 fs/btrfs/raid56.c struct btrfs_stripe_hash table[]; table 203 fs/btrfs/raid56.c struct btrfs_stripe_hash_table *table; table 221 fs/btrfs/raid56.c table_size = sizeof(*table) + sizeof(*h) * num_entries; table 222 fs/btrfs/raid56.c table = kvzalloc(table_size, GFP_KERNEL); table 223 fs/btrfs/raid56.c if (!table) table 226 fs/btrfs/raid56.c spin_lock_init(&table->cache_lock); table 227 fs/btrfs/raid56.c INIT_LIST_HEAD(&table->stripe_cache); table 229 fs/btrfs/raid56.c h = table->table; table 237 fs/btrfs/raid56.c x = cmpxchg(&info->stripe_hash_table, NULL, table); table 348 fs/btrfs/raid56.c struct btrfs_stripe_hash_table *table; table 358 fs/btrfs/raid56.c table = rbio->fs_info->stripe_hash_table; table 359 fs/btrfs/raid56.c h = table->table + bucket; table 374 fs/btrfs/raid56.c table->cache_size -= 1; table 407 fs/btrfs/raid56.c struct btrfs_stripe_hash_table *table; table 413 fs/btrfs/raid56.c table = rbio->fs_info->stripe_hash_table; table 415 fs/btrfs/raid56.c spin_lock_irqsave(&table->cache_lock, flags); table 417 fs/btrfs/raid56.c spin_unlock_irqrestore(&table->cache_lock, flags); table 425 fs/btrfs/raid56.c struct btrfs_stripe_hash_table *table; table 429 fs/btrfs/raid56.c table = info->stripe_hash_table; table 431 fs/btrfs/raid56.c spin_lock_irqsave(&table->cache_lock, flags); table 432 fs/btrfs/raid56.c while (!list_empty(&table->stripe_cache)) { table 433 fs/btrfs/raid56.c rbio = list_entry(table->stripe_cache.next, table 438 fs/btrfs/raid56.c spin_unlock_irqrestore(&table->cache_lock, flags); table 467 fs/btrfs/raid56.c struct btrfs_stripe_hash_table *table; table 473 fs/btrfs/raid56.c table = rbio->fs_info->stripe_hash_table; table 475 fs/btrfs/raid56.c spin_lock_irqsave(&table->cache_lock, flags); table 483 fs/btrfs/raid56.c list_move(&rbio->stripe_cache, &table->stripe_cache); table 485 fs/btrfs/raid56.c list_add(&rbio->stripe_cache, &table->stripe_cache); table 486 fs/btrfs/raid56.c table->cache_size += 1; table 491 fs/btrfs/raid56.c if (table->cache_size > RBIO_CACHE_SIZE) { table 494 fs/btrfs/raid56.c found = list_entry(table->stripe_cache.prev, table 502 fs/btrfs/raid56.c spin_unlock_irqrestore(&table->cache_lock, flags); table 675 fs/btrfs/raid56.c struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; table 766 fs/btrfs/raid56.c h = rbio->fs_info->stripe_hash_table->table + bucket; table 78 fs/cifs/cifs_unicode.h signed char *table; table 337 fs/cifs/cifs_unicode.h return uc + rp->table[uc - rp->start]; table 379 fs/cifs/cifs_unicode.h return uc + rp->table[uc - rp->start]; table 168 fs/dcache.c int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, table 174 fs/dcache.c return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 49 fs/drop_caches.c int drop_caches_sysctl_handler(struct ctl_table *table, int write, table 54 fs/drop_caches.c ret = proc_dointvec_minmax(table, write, buffer, length, ppos); table 4666 fs/ext4/inode.c ext4_fsblk_t b, end, table; table 4670 fs/ext4/inode.c table = ext4_inode_table(sb, gdp); table 4673 fs/ext4/inode.c if (table > b) table 4674 fs/ext4/inode.c b = table; table 4679 fs/ext4/inode.c table += num / inodes_per_block; table 4680 fs/ext4/inode.c if (end > table) table 4681 fs/ext4/inode.c end = table; table 82 fs/file_table.c int proc_nr_files(struct ctl_table *table, int write, table 86 fs/file_table.c return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 89 fs/file_table.c int proc_nr_files(struct ctl_table *table, int write, table 2184 fs/fs-writeback.c int dirtytime_interval_handler(struct ctl_table *table, int write, table 2189 fs/fs-writeback.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 53 fs/fscache/main.c static int fscache_max_active_sysctl(struct ctl_table *table, int write, table 57 fs/fscache/main.c struct workqueue_struct **wqp = table->extra1; table 58 fs/fscache/main.c unsigned int *datap = table->data; table 61 fs/fscache/main.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 122 fs/gfs2/glock.h int (*lm_mount) (struct gfs2_sbd *sdp, const char *table); table 1195 fs/gfs2/lock_dlm.c static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) table 1226 fs/gfs2/lock_dlm.c fsname = strchr(table, ':'); table 1233 fs/gfs2/lock_dlm.c memcpy(cluster, table, strlen(table) - strlen(fsname)); table 354 fs/gfs2/ops_fstype.c char *proto, *table; table 358 fs/gfs2/ops_fstype.c table = sdp->sd_args.ar_locktable; table 362 fs/gfs2/ops_fstype.c if (!proto[0] || !table[0]) { table 369 fs/gfs2/ops_fstype.c if (!table[0]) table 370 fs/gfs2/ops_fstype.c table = sdp->sd_sb.sb_locktable; table 373 fs/gfs2/ops_fstype.c if (!table[0]) table 374 fs/gfs2/ops_fstype.c table = sdp->sd_vfs->s_id; table 377 fs/gfs2/ops_fstype.c strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN); table 379 fs/gfs2/ops_fstype.c table = sdp->sd_table_name; table 380 fs/gfs2/ops_fstype.c while ((table = strchr(table, '/'))) table 381 fs/gfs2/ops_fstype.c *table = '_'; table 939 fs/gfs2/ops_fstype.c const char *table = sdp->sd_table_name; table 955 fs/gfs2/ops_fstype.c fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table); table 999 fs/gfs2/ops_fstype.c ret = lm->lm_mount(sdp, table); table 109 fs/inode.c int proc_nr_inodes(struct ctl_table *table, int write, table 114 fs/inode.c return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 635 fs/isofs/inode.c int table, error = -EINVAL; table 971 fs/isofs/inode.c table = 0; table 973 fs/isofs/inode.c table += 2; table 975 fs/isofs/inode.c table++; table 978 fs/isofs/inode.c if (table) table 979 fs/isofs/inode.c s->s_d_op = &isofs_dentry_ops[table - 1]; table 222 fs/jbd2/revoke.c struct jbd2_revoke_table_s *table; table 224 fs/jbd2/revoke.c table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL); table 225 fs/jbd2/revoke.c if (!table) table 231 fs/jbd2/revoke.c table->hash_size = hash_size; table 232 fs/jbd2/revoke.c table->hash_shift = shift; table 233 fs/jbd2/revoke.c table->hash_table = table 235 fs/jbd2/revoke.c if (!table->hash_table) { table 236 fs/jbd2/revoke.c kmem_cache_free(jbd2_revoke_table_cache, table); table 237 fs/jbd2/revoke.c table = NULL; table 242 fs/jbd2/revoke.c INIT_LIST_HEAD(&table->hash_table[tmp]); table 245 fs/jbd2/revoke.c return table; table 248 fs/jbd2/revoke.c static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table) table 253 fs/jbd2/revoke.c for (i = 0; i < table->hash_size; i++) { table 254 fs/jbd2/revoke.c hash_list = &table->hash_table[i]; table 258 fs/jbd2/revoke.c kfree(table->hash_table); table 259 fs/jbd2/revoke.c kmem_cache_free(jbd2_revoke_table_cache, table); table 16 fs/jfs/jfs_unicode.h signed char *table; table 120 fs/jfs/jfs_unicode.h return uc + rp->table[uc - rp->start]; table 37 fs/lockd/host.c #define for_each_host(host, chain, table) \ table 38 fs/lockd/host.c for ((chain) = (table); \ table 39 fs/lockd/host.c (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ table 42 fs/lockd/host.c #define for_each_host_safe(host, next, chain, table) \ table 43 fs/lockd/host.c for ((chain) = (table); \ table 44 fs/lockd/host.c (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ table 700 fs/nfs/nfs4proc.c tbl = slot->table; table 727 fs/nfs/nfs4proc.c tbl = slot->table; table 792 fs/nfs/nfs4proc.c session = slot->table->session; table 812 fs/nfs/nfs4proc.c nfs41_update_target_slotid(slot->table, slot, res); table 847 fs/nfs/nfs4proc.c if (slot->slot_nr < slot->table->target_highest_slotid) table 922 fs/nfs/nfs4proc.c if (res->sr_slot->table->session != NULL) table 930 fs/nfs/nfs4proc.c if (res->sr_slot->table->session != NULL) table 941 fs/nfs/nfs4proc.c if (!res->sr_slot->table->session) table 111 fs/nfs/nfs4session.c slot->table = tbl; table 362 fs/nfs/nfs4session.c struct nfs4_slot_table *tbl = slot->table; table 21 fs/nfs/nfs4session.h struct nfs4_slot_table *table; table 554 fs/nfs/nfs4trace.h sa_slot->table->highest_used_slotid; table 1921 fs/nfs/nfs4xdr.c tp = slot->table; table 2107 fs/nfs/nfs4xdr.c struct nfs4_session *session = args->sa_slot->table->session; table 5730 fs/nfs/nfs4xdr.c if (!res->sr_slot->table->session) table 5744 fs/nfs/nfs4xdr.c session = res->sr_slot->table->session; table 580 fs/nls/mac-celtic.c static struct nls_table table = { table 590 fs/nls/mac-celtic.c return register_nls(&table); table 595 fs/nls/mac-celtic.c unregister_nls(&table); table 510 fs/nls/mac-centeuro.c static struct nls_table table = { table 520 fs/nls/mac-centeuro.c return register_nls(&table); table 525 fs/nls/mac-centeuro.c unregister_nls(&table); table 580 fs/nls/mac-croatian.c static struct nls_table table = { table 590 fs/nls/mac-croatian.c return register_nls(&table); table 595 fs/nls/mac-croatian.c unregister_nls(&table); table 475 fs/nls/mac-cyrillic.c static struct nls_table table = { table 485 fs/nls/mac-cyrillic.c return register_nls(&table); table 490 fs/nls/mac-cyrillic.c unregister_nls(&table); table 545 fs/nls/mac-gaelic.c static struct nls_table table = { table 555 fs/nls/mac-gaelic.c return register_nls(&table); table 560 fs/nls/mac-gaelic.c unregister_nls(&table); table 475 fs/nls/mac-greek.c static struct nls_table table = { table 485 fs/nls/mac-greek.c return register_nls(&table); table 490 fs/nls/mac-greek.c unregister_nls(&table); table 580 fs/nls/mac-iceland.c static struct nls_table table = { table 590 fs/nls/mac-iceland.c return register_nls(&table); table 595 fs/nls/mac-iceland.c unregister_nls(&table); table 510 fs/nls/mac-inuit.c static struct nls_table table = { table 520 fs/nls/mac-inuit.c return register_nls(&table); table 525 fs/nls/mac-inuit.c unregister_nls(&table); table 615 fs/nls/mac-roman.c static struct nls_table table = { table 625 fs/nls/mac-roman.c return register_nls(&table); table 630 fs/nls/mac-roman.c unregister_nls(&table); table 580 fs/nls/mac-romanian.c static struct nls_table table = { table 590 fs/nls/mac-romanian.c return register_nls(&table); table 595 fs/nls/mac-romanian.c unregister_nls(&table); table 580 fs/nls/mac-turkish.c static struct nls_table table = { table 590 fs/nls/mac-turkish.c return register_nls(&table); table 595 fs/nls/mac-turkish.c unregister_nls(&table); table 145 fs/nls/nls_ascii.c static struct nls_table table = { table 155 fs/nls/nls_ascii.c return register_nls(&table); table 160 fs/nls/nls_ascii.c unregister_nls(&table); table 326 fs/nls/nls_cp1250.c static struct nls_table table = { table 336 fs/nls/nls_cp1250.c return register_nls(&table); table 340 fs/nls/nls_cp1250.c unregister_nls(&table); table 280 fs/nls/nls_cp1251.c static struct nls_table table = { table 290 fs/nls/nls_cp1251.c return register_nls(&table); table 295 fs/nls/nls_cp1251.c unregister_nls(&table); table 361 fs/nls/nls_cp1255.c static struct nls_table table = { table 372 fs/nls/nls_cp1255.c return register_nls(&table); table 377 fs/nls/nls_cp1255.c unregister_nls(&table); table 366 fs/nls/nls_cp437.c static struct nls_table table = { table 376 fs/nls/nls_cp437.c return register_nls(&table); table 381 fs/nls/nls_cp437.c unregister_nls(&table); table 329 fs/nls/nls_cp737.c static struct nls_table table = { table 339 fs/nls/nls_cp737.c return register_nls(&table); table 344 fs/nls/nls_cp737.c unregister_nls(&table); table 298 fs/nls/nls_cp775.c static struct nls_table table = { table 308 fs/nls/nls_cp775.c return register_nls(&table); table 313 fs/nls/nls_cp775.c unregister_nls(&table); table 294 fs/nls/nls_cp850.c static struct nls_table table = { table 304 fs/nls/nls_cp850.c return register_nls(&table); table 309 fs/nls/nls_cp850.c unregister_nls(&table); table 316 fs/nls/nls_cp852.c static struct nls_table table = { table 326 fs/nls/nls_cp852.c return register_nls(&table); table 331 fs/nls/nls_cp852.c unregister_nls(&table); table 278 fs/nls/nls_cp855.c static struct nls_table table = { table 288 fs/nls/nls_cp855.c return register_nls(&table); table 293 fs/nls/nls_cp855.c unregister_nls(&table); table 280 fs/nls/nls_cp857.c static struct nls_table table = { table 290 fs/nls/nls_cp857.c return register_nls(&table); table 295 fs/nls/nls_cp857.c unregister_nls(&table); table 343 fs/nls/nls_cp860.c static struct nls_table table = { table 353 fs/nls/nls_cp860.c return register_nls(&table); table 358 fs/nls/nls_cp860.c unregister_nls(&table); table 366 fs/nls/nls_cp861.c static struct nls_table table = { table 376 fs/nls/nls_cp861.c return register_nls(&table); table 381 fs/nls/nls_cp861.c unregister_nls(&table); table 400 fs/nls/nls_cp862.c static struct nls_table table = { table 410 fs/nls/nls_cp862.c return register_nls(&table); table 415 fs/nls/nls_cp862.c unregister_nls(&table); table 360 fs/nls/nls_cp863.c static struct nls_table table = { table 370 fs/nls/nls_cp863.c return register_nls(&table); table 375 fs/nls/nls_cp863.c unregister_nls(&table); table 386 fs/nls/nls_cp864.c static struct nls_table table = { table 396 fs/nls/nls_cp864.c return register_nls(&table); table 401 fs/nls/nls_cp864.c unregister_nls(&table); table 366 fs/nls/nls_cp865.c static struct nls_table table = { table 376 fs/nls/nls_cp865.c return register_nls(&table); table 381 fs/nls/nls_cp865.c unregister_nls(&table); table 284 fs/nls/nls_cp866.c static struct nls_table table = { table 294 fs/nls/nls_cp866.c return register_nls(&table); table 299 fs/nls/nls_cp866.c unregister_nls(&table); table 294 fs/nls/nls_cp869.c static struct nls_table table = { table 304 fs/nls/nls_cp869.c return register_nls(&table); table 309 fs/nls/nls_cp869.c unregister_nls(&table); table 252 fs/nls/nls_cp874.c static struct nls_table table = { table 263 fs/nls/nls_cp874.c return register_nls(&table); table 268 fs/nls/nls_cp874.c unregister_nls(&table); table 7910 fs/nls/nls_cp932.c static struct nls_table table = { table 7921 fs/nls/nls_cp932.c return register_nls(&table); table 7926 fs/nls/nls_cp932.c unregister_nls(&table); table 11088 fs/nls/nls_cp936.c static struct nls_table table = { table 11099 fs/nls/nls_cp936.c return register_nls(&table); table 11104 fs/nls/nls_cp936.c unregister_nls(&table); table 13923 fs/nls/nls_cp949.c static struct nls_table table = { table 13934 fs/nls/nls_cp949.c return register_nls(&table); table 13939 fs/nls/nls_cp949.c unregister_nls(&table); table 9459 fs/nls/nls_cp950.c static struct nls_table table = { table 9470 fs/nls/nls_cp950.c return register_nls(&table); table 9475 fs/nls/nls_cp950.c unregister_nls(&table); table 552 fs/nls/nls_euc-jp.c static struct nls_table table = { table 563 fs/nls/nls_euc-jp.c table.charset2upper = p_nls->charset2upper; table 564 fs/nls/nls_euc-jp.c table.charset2lower = p_nls->charset2lower; table 565 fs/nls/nls_euc-jp.c return register_nls(&table); table 573 fs/nls/nls_euc-jp.c unregister_nls(&table); table 236 fs/nls/nls_iso8859-1.c static struct nls_table table = { table 246 fs/nls/nls_iso8859-1.c return register_nls(&table); table 251 fs/nls/nls_iso8859-1.c unregister_nls(&table); table 264 fs/nls/nls_iso8859-13.c static struct nls_table table = { table 274 fs/nls/nls_iso8859-13.c return register_nls(&table); table 279 fs/nls/nls_iso8859-13.c unregister_nls(&table); table 320 fs/nls/nls_iso8859-14.c static struct nls_table table = { table 330 fs/nls/nls_iso8859-14.c return register_nls(&table); table 335 fs/nls/nls_iso8859-14.c unregister_nls(&table); table 286 fs/nls/nls_iso8859-15.c static struct nls_table table = { table 296 fs/nls/nls_iso8859-15.c return register_nls(&table); table 301 fs/nls/nls_iso8859-15.c unregister_nls(&table); table 287 fs/nls/nls_iso8859-2.c static struct nls_table table = { table 297 fs/nls/nls_iso8859-2.c return register_nls(&table); table 302 fs/nls/nls_iso8859-2.c unregister_nls(&table); table 287 fs/nls/nls_iso8859-3.c static struct nls_table table = { table 297 fs/nls/nls_iso8859-3.c return register_nls(&table); table 302 fs/nls/nls_iso8859-3.c unregister_nls(&table); table 287 fs/nls/nls_iso8859-4.c static struct nls_table table = { table 297 fs/nls/nls_iso8859-4.c return register_nls(&table); table 302 fs/nls/nls_iso8859-4.c unregister_nls(&table); table 251 fs/nls/nls_iso8859-5.c static struct nls_table table = { table 261 fs/nls/nls_iso8859-5.c return register_nls(&table); table 266 fs/nls/nls_iso8859-5.c unregister_nls(&table); table 242 fs/nls/nls_iso8859-6.c static struct nls_table table = { table 252 fs/nls/nls_iso8859-6.c return register_nls(&table); table 257 fs/nls/nls_iso8859-6.c unregister_nls(&table); table 296 fs/nls/nls_iso8859-7.c static struct nls_table table = { table 306 fs/nls/nls_iso8859-7.c return register_nls(&table); table 311 fs/nls/nls_iso8859-7.c unregister_nls(&table); table 251 fs/nls/nls_iso8859-9.c static struct nls_table table = { table 261 fs/nls/nls_iso8859-9.c return register_nls(&table); table 266 fs/nls/nls_iso8859-9.c unregister_nls(&table); table 302 fs/nls/nls_koi8-r.c static struct nls_table table = { table 312 fs/nls/nls_koi8-r.c return register_nls(&table); table 317 fs/nls/nls_koi8-r.c unregister_nls(&table); table 54 fs/nls/nls_koi8-ru.c static struct nls_table table = { table 65 fs/nls/nls_koi8-ru.c table.charset2upper = p_nls->charset2upper; table 66 fs/nls/nls_koi8-ru.c table.charset2lower = p_nls->charset2lower; table 67 fs/nls/nls_koi8-ru.c return register_nls(&table); table 75 fs/nls/nls_koi8-ru.c unregister_nls(&table); table 309 fs/nls/nls_koi8-u.c static struct nls_table table = { table 319 fs/nls/nls_koi8-u.c return register_nls(&table); table 324 fs/nls/nls_koi8-u.c unregister_nls(&table); table 43 fs/nls/nls_utf8.c static struct nls_table table = { table 57 fs/nls/nls_utf8.c return register_nls(&table); table 62 fs/nls/nls_utf8.c unregister_nls(&table); table 188 fs/proc/proc_sysctl.c struct ctl_node *node, struct ctl_table *table) table 190 fs/proc/proc_sysctl.c head->ctl_table = table; table 191 fs/proc/proc_sysctl.c head->ctl_table_arg = table; table 203 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++, node++) table 450 fs/proc/proc_sysctl.c static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op) table 456 fs/proc/proc_sysctl.c mode = root->permissions(head, table); table 458 fs/proc/proc_sysctl.c mode = table->mode; table 464 fs/proc/proc_sysctl.c struct ctl_table_header *head, struct ctl_table *table) table 485 fs/proc/proc_sysctl.c ei->sysctl_entry = table; table 491 fs/proc/proc_sysctl.c inode->i_mode = table->mode; table 492 fs/proc/proc_sysctl.c if (!S_ISDIR(table->mode)) { table 505 fs/proc/proc_sysctl.c root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); table 580 fs/proc/proc_sysctl.c struct ctl_table *table = PROC_I(inode)->sysctl_entry; table 592 fs/proc/proc_sysctl.c if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) table 597 fs/proc/proc_sysctl.c if (!table->proc_handler) table 600 fs/proc/proc_sysctl.c error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, &count, table 611 fs/proc/proc_sysctl.c error = table->proc_handler(table, write, (void __user *)new_buf, table 616 fs/proc/proc_sysctl.c error = table->proc_handler(table, write, buf, &count, ppos); table 642 fs/proc/proc_sysctl.c struct ctl_table *table = PROC_I(inode)->sysctl_entry; table 648 fs/proc/proc_sysctl.c if (table->poll) table 649 fs/proc/proc_sysctl.c filp->private_data = proc_sys_poll_event(table->poll); table 660 fs/proc/proc_sysctl.c struct ctl_table *table = PROC_I(inode)->sysctl_entry; table 668 fs/proc/proc_sysctl.c if (!table->proc_handler) table 671 fs/proc/proc_sysctl.c if (!table->poll) table 675 fs/proc/proc_sysctl.c poll_wait(filp, &table->poll->wait, wait); table 677 fs/proc/proc_sysctl.c if (event != atomic_read(&table->poll->event)) { table 678 fs/proc/proc_sysctl.c filp->private_data = proc_sys_poll_event(table->poll); table 691 fs/proc/proc_sysctl.c struct ctl_table *table) table 699 fs/proc/proc_sysctl.c qname.name = table->procname; table 700 fs/proc/proc_sysctl.c qname.len = strlen(table->procname); table 711 fs/proc/proc_sysctl.c inode = proc_sys_make_inode(dir->d_sb, head, table); table 740 fs/proc/proc_sysctl.c struct ctl_table *table) table 749 fs/proc/proc_sysctl.c if (sysctl_follow_link(&head, &table)) table 752 fs/proc/proc_sysctl.c ret = proc_sys_fill_cache(file, ctx, head, table); table 758 fs/proc/proc_sysctl.c static int scan(struct ctl_table_header *head, struct ctl_table *table, table 767 fs/proc/proc_sysctl.c if (unlikely(S_ISLNK(table->mode))) table 768 fs/proc/proc_sysctl.c res = proc_sys_link_fill_cache(file, ctx, head, table); table 770 fs/proc/proc_sysctl.c res = proc_sys_fill_cache(file, ctx, head, table); table 814 fs/proc/proc_sysctl.c struct ctl_table *table; table 825 fs/proc/proc_sysctl.c table = PROC_I(inode)->sysctl_entry; table 826 fs/proc/proc_sysctl.c if (!table) /* global root - r-xr-xr-x */ table 829 fs/proc/proc_sysctl.c error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK); table 857 fs/proc/proc_sysctl.c struct ctl_table *table = PROC_I(inode)->sysctl_entry; table 863 fs/proc/proc_sysctl.c if (table) table 864 fs/proc/proc_sysctl.c stat->mode = (stat->mode & S_IFMT) | table->mode; table 967 fs/proc/proc_sysctl.c struct ctl_table *table; table 979 fs/proc/proc_sysctl.c table = (struct ctl_table *)(node + 1); table 980 fs/proc/proc_sysctl.c new_name = (char *)(table + 2); table 983 fs/proc/proc_sysctl.c table[0].procname = new_name; table 984 fs/proc/proc_sysctl.c table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; table 985 fs/proc/proc_sysctl.c init_header(&new->header, set->dir.header.root, set, node, table); table 1099 fs/proc/proc_sysctl.c static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) table 1109 fs/proc/proc_sysctl.c path, table->procname, &vaf); table 1115 fs/proc/proc_sysctl.c static int sysctl_check_table_array(const char *path, struct ctl_table *table) table 1119 fs/proc/proc_sysctl.c if ((table->proc_handler == proc_douintvec) || table 1120 fs/proc/proc_sysctl.c (table->proc_handler == proc_douintvec_minmax)) { table 1121 fs/proc/proc_sysctl.c if (table->maxlen != sizeof(unsigned int)) table 1122 fs/proc/proc_sysctl.c err |= sysctl_err(path, table, "array not allowed"); table 1128 fs/proc/proc_sysctl.c static int sysctl_check_table(const char *path, struct ctl_table *table) table 1131 fs/proc/proc_sysctl.c for (; table->procname; table++) { table 1132 fs/proc/proc_sysctl.c if (table->child) table 1133 fs/proc/proc_sysctl.c err |= sysctl_err(path, table, "Not a file"); table 1135 fs/proc/proc_sysctl.c if ((table->proc_handler == proc_dostring) || table 1136 fs/proc/proc_sysctl.c (table->proc_handler == proc_dointvec) || table 1137 fs/proc/proc_sysctl.c (table->proc_handler == proc_douintvec) || table 1138 fs/proc/proc_sysctl.c (table->proc_handler == proc_douintvec_minmax) || table 1139 fs/proc/proc_sysctl.c (table->proc_handler == proc_dointvec_minmax) || table 1140 fs/proc/proc_sysctl.c (table->proc_handler == proc_dointvec_jiffies) || table 1141 fs/proc/proc_sysctl.c (table->proc_handler == proc_dointvec_userhz_jiffies) || table 1142 fs/proc/proc_sysctl.c (table->proc_handler == proc_dointvec_ms_jiffies) || table 1143 fs/proc/proc_sysctl.c (table->proc_handler == proc_doulongvec_minmax) || table 1144 fs/proc/proc_sysctl.c (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { table 1145 fs/proc/proc_sysctl.c if (!table->data) table 1146 fs/proc/proc_sysctl.c err |= sysctl_err(path, table, "No data"); table 1147 fs/proc/proc_sysctl.c if (!table->maxlen) table 1148 fs/proc/proc_sysctl.c err |= sysctl_err(path, table, "No maxlen"); table 1150 fs/proc/proc_sysctl.c err |= sysctl_check_table_array(path, table); table 1152 fs/proc/proc_sysctl.c if (!table->proc_handler) table 1153 fs/proc/proc_sysctl.c err |= sysctl_err(path, table, "No proc_handler"); table 1155 fs/proc/proc_sysctl.c if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode) table 1156 fs/proc/proc_sysctl.c err |= sysctl_err(path, table, "bogus .mode 0%o", table 1157 fs/proc/proc_sysctl.c table->mode); table 1162 fs/proc/proc_sysctl.c static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table, table 1173 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) { table 1191 fs/proc/proc_sysctl.c for (link = link_table, entry = table; entry->procname; link++, entry++) { table 1206 fs/proc/proc_sysctl.c struct ctl_table *table, struct ctl_table_root *link_root) table 1212 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) { table 1225 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) { table 1318 fs/proc/proc_sysctl.c const char *path, struct ctl_table *table) table 1328 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) table 1337 fs/proc/proc_sysctl.c init_header(header, root, set, node, table); table 1338 fs/proc/proc_sysctl.c if (sysctl_check_table(path, table)) table 1393 fs/proc/proc_sysctl.c struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) table 1396 fs/proc/proc_sysctl.c path, table); table 1413 fs/proc/proc_sysctl.c static int count_subheaders(struct ctl_table *table) table 1420 fs/proc/proc_sysctl.c if (!table || !table->procname) table 1423 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) { table 1434 fs/proc/proc_sysctl.c struct ctl_table *table) table 1442 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) { table 1449 fs/proc/proc_sysctl.c files = table; table 1459 fs/proc/proc_sysctl.c for (new = files, entry = table; entry->procname; entry++) { table 1483 fs/proc/proc_sysctl.c for (entry = table; entry->procname; entry++) { table 1519 fs/proc/proc_sysctl.c const struct ctl_path *path, struct ctl_table *table) table 1521 fs/proc/proc_sysctl.c struct ctl_table *ctl_table_arg = table; table 1522 fs/proc/proc_sysctl.c int nr_subheaders = count_subheaders(table); table 1537 fs/proc/proc_sysctl.c while (table->procname && table->child && !table[1].procname) { table 1538 fs/proc/proc_sysctl.c pos = append_path(new_path, pos, table->procname); table 1541 fs/proc/proc_sysctl.c table = table->child; table 1544 fs/proc/proc_sysctl.c header = __register_sysctl_table(set, new_path, table); table 1558 fs/proc/proc_sysctl.c set, table)) table 1569 fs/proc/proc_sysctl.c struct ctl_table *table = subh->ctl_table_arg; table 1571 fs/proc/proc_sysctl.c kfree(table); table 1589 fs/proc/proc_sysctl.c struct ctl_table *table) table 1592 fs/proc/proc_sysctl.c path, table); table 1605 fs/proc/proc_sysctl.c struct ctl_table_header *register_sysctl_table(struct ctl_table *table) table 1609 fs/proc/proc_sysctl.c return register_sysctl_paths(null_path, table); table 1689 fs/proc/proc_sysctl.c struct ctl_table *table = subh->ctl_table_arg; table 1691 fs/proc/proc_sysctl.c kfree(table); table 2862 fs/quota/dquot.c static int do_proc_dqstats(struct ctl_table *table, int write, table 2865 fs/quota/dquot.c unsigned int type = (unsigned long *)table->data - dqstats.stat; table 2875 fs/quota/dquot.c return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 429 fs/reiserfs/journal.c **table, table 433 fs/reiserfs/journal.c cn = journal_hash(table, sb, bl); table 519 fs/reiserfs/journal.c static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, table 524 fs/reiserfs/journal.c cn_orig = journal_hash(table, cn->sb, cn->blocknr); table 530 fs/reiserfs/journal.c journal_hash(table, cn->sb, cn->blocknr) = cn; table 1841 fs/reiserfs/journal.c struct reiserfs_journal_cnode **table, table 1848 fs/reiserfs/journal.c head = &(journal_hash(table, sb, block)); table 103 fs/select.c #define POLL_TABLE_FULL(table) \ table 104 fs/select.c ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) table 127 fs/select.c pwq->table = NULL; table 140 fs/select.c struct poll_table_page * p = pwq->table; table 162 fs/select.c struct poll_table_page *table = p->table; table 167 fs/select.c if (!table || POLL_TABLE_FULL(table)) { table 176 fs/select.c new_table->next = table; table 177 fs/select.c p->table = new_table; table 178 fs/select.c table = new_table; table 181 fs/select.c return table->entry++; table 479 fs/select.c struct poll_wqueues table; table 494 fs/select.c poll_initwait(&table); table 495 fs/select.c wait = &table.pt; table 577 fs/select.c if (table.error) { table 578 fs/select.c retval = table.error; table 603 fs/select.c if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, table 608 fs/select.c poll_freewait(&table); table 963 fs/select.c struct poll_wqueues table; table 1000 fs/select.c poll_initwait(&table); table 1001 fs/select.c fdcount = do_poll(head, &table, end_time); table 1002 fs/select.c poll_freewait(&table); table 410 fs/squashfs/cache.c void *table, *buffer, **data; table 413 fs/squashfs/cache.c table = buffer = kmalloc(length, GFP_KERNEL); table 414 fs/squashfs/cache.c if (table == NULL) table 441 fs/squashfs/cache.c return table; table 446 fs/squashfs/cache.c kfree(table); table 114 fs/squashfs/export.c __le64 *table; table 131 fs/squashfs/export.c table = squashfs_read_table(sb, lookup_table_start, length); table 137 fs/squashfs/export.c if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { table 138 fs/squashfs/export.c kfree(table); table 142 fs/squashfs/export.c return table; table 67 fs/squashfs/fragment.c __le64 *table; table 77 fs/squashfs/fragment.c table = squashfs_read_table(sb, fragment_table_start, length); table 83 fs/squashfs/fragment.c if (!IS_ERR(table) && le64_to_cpu(table[0]) >= fragment_table_start) { table 84 fs/squashfs/fragment.c kfree(table); table 88 fs/squashfs/fragment.c return table; table 59 fs/squashfs/id.c __le64 *table; table 77 fs/squashfs/id.c table = squashfs_read_table(sb, id_table_start, length); table 83 fs/squashfs/id.c if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { table 84 fs/squashfs/id.c kfree(table); table 88 fs/squashfs/id.c return table; table 358 fs/udf/balloc.c struct inode *table, table 383 fs/udf/balloc.c iinfo = UDF_I(table); table 395 fs/udf/balloc.c (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { table 413 fs/udf/balloc.c udf_write_aext(table, &oepos, &eloc, elen, 1); table 432 fs/udf/balloc.c udf_write_aext(table, &oepos, &eloc, elen, 1); table 478 fs/udf/balloc.c udf_setup_indirect_aext(table, eloc.logicalBlockNum, table 487 fs/udf/balloc.c __udf_add_aext(table, &epos, &eloc, elen, 1); table 499 fs/udf/balloc.c struct inode *table, uint16_t partition, table 513 fs/udf/balloc.c iinfo = UDF_I(table); table 528 fs/udf/balloc.c (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { table 542 fs/udf/balloc.c udf_write_aext(table, &epos, &eloc, table 545 fs/udf/balloc.c udf_delete_aext(table, epos); table 559 fs/udf/balloc.c struct inode *table, uint16_t partition, table 570 fs/udf/balloc.c struct udf_inode_info *iinfo = UDF_I(table); table 595 fs/udf/balloc.c (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { table 639 fs/udf/balloc.c udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); table 641 fs/udf/balloc.c udf_delete_aext(table, goal_epos); table 2469 fs/udf/super.c struct inode *table) table 2478 fs/udf/super.c epos.block = UDF_I(table)->i_location; table 2482 fs/udf/super.c while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) table 2483 fs/udf/super.c accum += (elen >> table->i_sb->s_blocksize_bits); table 763 fs/ufs/balloc.c unsigned char *table, unsigned char mask) table 778 fs/ufs/balloc.c while ((table[*cp++] & mask) == 0 && --rest) table 238 fs/unicode/utf8-selftest.c struct unicode_map *table = utf8_load("12.1.0"); table 240 fs/unicode/utf8-selftest.c if (IS_ERR(table)) { table 252 fs/unicode/utf8-selftest.c test_f(!utf8_strncmp(table, &s1, &s2), table 262 fs/unicode/utf8-selftest.c test_f(!utf8_strncasecmp(table, &s1, &s2), table 266 fs/unicode/utf8-selftest.c utf8_unload(table); table 381 include/acpi/acpiosxf.h struct acpi_table_header **table, table 388 include/acpi/acpiosxf.h struct acpi_table_header **table, table 395 include/acpi/acpiosxf.h struct acpi_table_header **table); table 461 include/acpi/acpixf.h acpi_load_table(struct acpi_table_header *table)) table 487 include/acpi/acpixf.h ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table)) table 1070 include/acpi/actypes.h acpi_status (*acpi_table_handler) (u32 event, void *table, void *context); table 183 include/asm-generic/tlb.h extern void tlb_remove_table(struct mmu_gather *tlb, void *table); table 48 include/drm/drm_hashtab.h struct hlist_head *table; table 137 include/linux/acpi.h typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); table 596 include/linux/acpi.h char *table; table 645 include/linux/acpi.h int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); table 1100 include/linux/acpi.h #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ table 1102 include/linux/acpi.h __used __section(__##table##_acpi_probe_table) \ table 1215 include/linux/acpi.h #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ table 150 include/linux/atalk.h struct aarp_entry **table; table 429 include/linux/bio.h extern void bio_init(struct bio *bio, struct bio_vec *table, table 122 include/linux/bpf-cgroup.h struct ctl_table *table, int write, table 287 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ table 291 include/linux/bpf-cgroup.h __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ table 399 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) table 452 include/linux/clk-provider.h const struct clk_div_table *table; table 472 include/linux/clk-provider.h unsigned int val, const struct clk_div_table *table, table 476 include/linux/clk-provider.h const struct clk_div_table *table, table 480 include/linux/clk-provider.h const struct clk_div_table *table, u8 width, table 483 include/linux/clk-provider.h const struct clk_div_table *table, u8 width, table 497 include/linux/clk-provider.h u8 clk_divider_flags, const struct clk_div_table *table, table 502 include/linux/clk-provider.h u8 clk_divider_flags, const struct clk_div_table *table, table 539 include/linux/clk-provider.h u32 *table; table 573 include/linux/clk-provider.h u8 clk_mux_flags, u32 *table, spinlock_t *lock); table 578 include/linux/clk-provider.h u8 clk_mux_flags, u32 *table, spinlock_t *lock); table 580 include/linux/clk-provider.h int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, table 582 include/linux/clk-provider.h unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index); table 854 include/linux/clk-provider.h const struct clk_div_table *table, table 858 include/linux/clk-provider.h rate, prate, table, width, flags); table 863 include/linux/clk-provider.h const struct clk_div_table *table, table 868 include/linux/clk-provider.h rate, prate, table, width, flags, table 88 include/linux/compaction.h extern int sysctl_compaction_handler(struct ctl_table *table, int write, table 73 include/linux/console.h const unsigned char *table); table 631 include/linux/cpufreq.h struct cpufreq_frequency_table **table); table 633 include/linux/cpufreq.h struct cpufreq_frequency_table **table); table 637 include/linux/cpufreq.h **table) table 644 include/linux/cpufreq.h **table) table 655 include/linux/cpufreq.h #define cpufreq_for_each_entry(pos, table) \ table 656 include/linux/cpufreq.h for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) table 666 include/linux/cpufreq.h #define cpufreq_for_each_entry_idx(pos, table, idx) \ table 667 include/linux/cpufreq.h for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ table 677 include/linux/cpufreq.h #define cpufreq_for_each_valid_entry(pos, table) \ table 678 include/linux/cpufreq.h for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ table 691 include/linux/cpufreq.h #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ table 692 include/linux/cpufreq.h cpufreq_for_each_entry_idx(pos, table, idx) \ table 699 include/linux/cpufreq.h struct cpufreq_frequency_table *table); table 702 include/linux/cpufreq.h struct cpufreq_frequency_table *table); table 723 include/linux/cpufreq.h struct cpufreq_frequency_table *table = policy->freq_table; table 728 include/linux/cpufreq.h cpufreq_for_each_valid_entry_idx(pos, table, idx) { table 744 include/linux/cpufreq.h struct cpufreq_frequency_table *table = policy->freq_table; table 749 include/linux/cpufreq.h cpufreq_for_each_valid_entry_idx(pos, table, idx) { table 786 include/linux/cpufreq.h struct cpufreq_frequency_table *table = policy->freq_table; table 791 include/linux/cpufreq.h cpufreq_for_each_valid_entry_idx(pos, table, idx) { table 816 include/linux/cpufreq.h struct cpufreq_frequency_table *table = policy->freq_table; table 821 include/linux/cpufreq.h cpufreq_for_each_valid_entry_idx(pos, table, idx) { table 849 include/linux/cpufreq.h struct cpufreq_frequency_table *table = policy->freq_table; table 854 include/linux/cpufreq.h cpufreq_for_each_valid_entry_idx(pos, table, idx) { table 870 include/linux/cpufreq.h if (target_freq - table[best].frequency > freq - target_freq) table 883 include/linux/cpufreq.h struct cpufreq_frequency_table *table = policy->freq_table; table 888 include/linux/cpufreq.h cpufreq_for_each_valid_entry_idx(pos, table, idx) { table 904 include/linux/cpufreq.h if (table[best].frequency - target_freq > target_freq - freq) table 1002 include/linux/cpufreq.h struct cpufreq_frequency_table *table, table 55 include/linux/crc8.h void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); table 73 include/linux/crc8.h void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); table 99 include/linux/crc8.h u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); table 22 include/linux/devcoredump.h static inline void _devcd_free_sgtable(struct scatterlist *table) table 30 include/linux/devcoredump.h iter = table; table 31 include/linux/devcoredump.h for_each_sg(table, iter, sg_nents(table), i) { table 38 include/linux/devcoredump.h iter = table; table 39 include/linux/devcoredump.h delete_iter = table; /* always points on a head of a table */ table 64 include/linux/devcoredump.h void dev_coredumpsg(struct device *dev, struct scatterlist *table, table 83 include/linux/devcoredump.h static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, table 86 include/linux/devcoredump.h _devcd_free_sgtable(table); table 252 include/linux/device-mapper.h struct dm_table *table; table 9 include/linux/efi-bgrt.h void efi_bgrt_init(struct acpi_table_header *table); table 10 include/linux/efi-bgrt.h int __init acpi_parse_bgrt(struct acpi_table_header *table); table 18 include/linux/efi-bgrt.h static inline void efi_bgrt_init(struct acpi_table_header *table) {} table 19 include/linux/efi-bgrt.h static inline int __init acpi_parse_bgrt(struct acpi_table_header *table) table 700 include/linux/efi.h u64 table; table 705 include/linux/efi.h u32 table; table 710 include/linux/efi.h unsigned long table; table 38 include/linux/energy_model.h struct em_cap_state *table; table 93 include/linux/energy_model.h cs = &pd->table[pd->nr_cap_states - 1]; table 101 include/linux/energy_model.h cs = &pd->table[i]; table 1213 include/linux/filter.h struct ctl_table *table; table 3507 include/linux/fs.h int proc_nr_files(struct ctl_table *table, int write, table 3509 include/linux/fs.h int proc_nr_dentry(struct ctl_table *table, int write, table 3511 include/linux/fs.h int proc_nr_inodes(struct ctl_table *table, int write, table 83 include/linux/ftrace.h ftrace_enable_sysctl(struct ctl_table *table, int write, table 246 include/linux/ftrace.h int stack_trace_sysctl(struct ctl_table *table, int write, table 896 include/linux/ftrace.h int tracepoint_printk_sysctl(struct ctl_table *table, int write, table 43 include/linux/gpio/machine.h struct gpiod_lookup table[]; table 96 include/linux/gpio/machine.h void gpiod_add_lookup_table(struct gpiod_lookup_table *table); table 98 include/linux/gpio/machine.h void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); table 102 include/linux/gpio/machine.h void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} table 106 include/linux/gpio/machine.h void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} table 565 include/linux/intel-iommu.h struct list_head table; /* link to pasid table */ table 314 include/linux/kprobes.h extern int proc_kprobes_optimization_handler(struct ctl_table *table, table 41 include/linux/latencytop.h extern int sysctl_latencytop(struct ctl_table *table, int write, table 107 include/linux/lz4.h unsigned long long table[LZ4_STREAMSIZE_U64]; table 132 include/linux/lz4.h size_t table[LZ4_STREAMHCSIZE_SIZET]; table 149 include/linux/lz4.h unsigned long long table[LZ4_STREAMDECODESIZE_U64]; table 455 include/linux/mtd/spinand.h const struct spinand_info *table, table 716 include/linux/netdevice.h static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, table 719 include/linux/netdevice.h if (table && hash) { table 720 include/linux/netdevice.h unsigned int index = hash & table->mask; table 726 include/linux/netdevice.h if (table->ents[index] != val) table 727 include/linux/netdevice.h table->ents[index] = val; table 94 include/linux/netfilter/x_tables.h const char *table; table 125 include/linux/netfilter/x_tables.h const char *table; table 169 include/linux/netfilter/x_tables.h const char *table; table 210 include/linux/netfilter/x_tables.h const char *table; table 309 include/linux/netfilter/x_tables.h const struct xt_table *table, table 312 include/linux/netfilter/x_tables.h void *xt_unregister_table(struct xt_table *table); table 314 include/linux/netfilter/x_tables.h struct xt_table_info *xt_replace_table(struct xt_table *table, table 52 include/linux/netfilter_arp/arp_tables.h int arpt_register_table(struct net *net, const struct xt_table *table, table 55 include/linux/netfilter_arp/arp_tables.h void arpt_unregister_table(struct net *net, struct xt_table *table, table 59 include/linux/netfilter_arp/arp_tables.h struct xt_table *table); table 27 include/linux/netfilter_bridge/ebtables.h bool (*checkentry)(const char *table, const void *entry, table 44 include/linux/netfilter_bridge/ebtables.h bool (*checkentry)(const char *table, const void *entry, table 62 include/linux/netfilter_bridge/ebtables.h bool (*checkentry)(const char *table, const void *entry, table 94 include/linux/netfilter_bridge/ebtables.h struct ebt_replace_kernel *table; table 110 include/linux/netfilter_bridge/ebtables.h const struct ebt_table *table, table 113 include/linux/netfilter_bridge/ebtables.h extern void ebt_unregister_table(struct net *net, struct ebt_table *table, table 117 include/linux/netfilter_bridge/ebtables.h struct ebt_table *table); table 25 include/linux/netfilter_ipv4/ip_tables.h int ipt_register_table(struct net *net, const struct xt_table *table, table 28 include/linux/netfilter_ipv4/ip_tables.h void ipt_unregister_table(struct net *net, struct xt_table *table, table 67 include/linux/netfilter_ipv4/ip_tables.h struct xt_table *table); table 27 include/linux/netfilter_ipv6/ip6_tables.h int ip6t_register_table(struct net *net, const struct xt_table *table, table 30 include/linux/netfilter_ipv6/ip6_tables.h void ip6t_unregister_table(struct net *net, struct xt_table *table, table 34 include/linux/netfilter_ipv6/ip6_tables.h struct xt_table *table); table 107 include/linux/nvmem-provider.h void nvmem_add_cell_table(struct nvmem_cell_table *table); table 108 include/linux/nvmem-provider.h void nvmem_del_cell_table(struct nvmem_cell_table *table); table 131 include/linux/nvmem-provider.h static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} table 132 include/linux/nvmem-provider.h static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} table 1282 include/linux/of.h #define _OF_DECLARE(table, name, compat, fn, fn_type) \ table 1284 include/linux/of.h __used __section(__##table##_of_table) \ table 1288 include/linux/of.h #define _OF_DECLARE(table, name, compat, fn, fn_type) \ table 1299 include/linux/of.h #define OF_DECLARE_1(table, name, compat, fn) \ table 1300 include/linux/of.h _OF_DECLARE(table, name, compat, fn, of_init_fn_1) table 1301 include/linux/of.h #define OF_DECLARE_1_RET(table, name, compat, fn) \ table 1302 include/linux/of.h _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) table 1303 include/linux/of.h #define OF_DECLARE_2(table, name, compat, fn) \ table 1304 include/linux/of.h _OF_DECLARE(table, name, compat, fn, of_init_fn_2) table 776 include/linux/page-flags.h PAGE_TYPE_OPS(Table, table) table 29 include/linux/parser.h int match_token(char *, const match_table_t table, substring_t args[]); table 1234 include/linux/perf_event.h extern int perf_proc_update_handler(struct ctl_table *table, int write, table 1237 include/linux/perf_event.h extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, table 1241 include/linux/perf_event.h int perf_event_max_stack_handler(struct ctl_table *table, int write, table 128 include/linux/platform_data/brcmfmac.h struct brcmfmac_pd_cc_entry table[0]; table 105 include/linux/poll.h struct poll_table_page *table; table 377 include/linux/power_supply.h extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, table 192 include/linux/printk.h devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, table 596 include/linux/pwm.h void pwm_add_table(struct pwm_lookup *table, size_t num); table 597 include/linux/pwm.h void pwm_remove_table(struct pwm_lookup *table, size_t num); table 599 include/linux/pwm.h static inline void pwm_add_table(struct pwm_lookup *table, size_t num) table 603 include/linux/pwm.h static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) table 276 include/linux/qed/qed_eth_if.h int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); table 1052 include/linux/regmap.h const struct regmap_access_table *table); table 123 include/linux/rio.h u16 table, u16 route_destid, u8 route_port); table 125 include/linux/rio.h u16 table, u16 route_destid, u8 *route_port); table 127 include/linux/rio.h u16 table); table 335 include/linux/scatterlist.h void sg_free_table_chained(struct sg_table *table, table 337 include/linux/scatterlist.h int sg_alloc_table_chained(struct sg_table *table, int nents, table 15 include/linux/sched/sysctl.h extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, table 45 include/linux/sched/sysctl.h int sched_proc_update_handler(struct ctl_table *table, int write, table 75 include/linux/sched/sysctl.h extern int sched_rr_handler(struct ctl_table *table, int write, table 79 include/linux/sched/sysctl.h extern int sched_rt_handler(struct ctl_table *table, int write, table 84 include/linux/sched/sysctl.h extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, table 89 include/linux/sched/sysctl.h extern int sysctl_numa_balancing(struct ctl_table *table, int write, table 93 include/linux/sched/sysctl.h extern int sysctl_schedstats(struct ctl_table *table, int write, table 99 include/linux/sched/sysctl.h extern int sched_energy_aware_handler(struct ctl_table *table, int write, table 212 include/linux/security.h extern int mmap_min_addr_handler(struct ctl_table *table, int write, table 174 include/linux/sfi.h typedef int (*sfi_table_handler) (struct sfi_table_header *table); table 169 include/linux/sh_clk.h struct clk_div4_table *table); table 171 include/linux/sh_clk.h struct clk_div4_table *table); table 173 include/linux/sh_clk.h struct clk_div4_table *table); table 164 include/linux/soc/qcom/llcc-qcom.h const struct llcc_slice_config *table, u32 sz); table 201 include/linux/soc/qcom/llcc-qcom.h const struct llcc_slice_config *table, u32 sz) table 27 include/linux/stackleak.h int stack_erasing_sysctl(struct ctl_table *table, int write, table 58 include/linux/sysctl.h extern int proc_douintvec_minmax(struct ctl_table *table, int write, table 69 include/linux/sysctl.h extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, table 73 include/linux/sysctl.h extern int proc_do_static_key(struct ctl_table *table, int write, table 178 include/linux/sysctl.h struct ctl_table *table, table 180 include/linux/sysctl.h int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); table 199 include/linux/sysctl.h const char *path, struct ctl_table *table); table 202 include/linux/sysctl.h const struct ctl_path *path, struct ctl_table *table); table 203 include/linux/sysctl.h struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table); table 204 include/linux/sysctl.h struct ctl_table_header *register_sysctl_table(struct ctl_table * table); table 206 include/linux/sysctl.h struct ctl_table *table); table 208 include/linux/sysctl.h void unregister_sysctl_table(struct ctl_table_header * table); table 215 include/linux/sysctl.h static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) table 221 include/linux/sysctl.h const struct ctl_path *path, struct ctl_table *table) table 226 include/linux/sysctl.h static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) table 231 include/linux/sysctl.h static inline void unregister_sysctl_table(struct ctl_table_header * table) table 243 include/linux/sysctl.h int sysctl_max_threads(struct ctl_table *table, int write, table 203 include/linux/timer.h int timer_migration_handler(struct ctl_table *table, int write, table 774 include/linux/usb/gadget.h int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf); table 19 include/linux/vmstat.h extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, table 65 include/linux/vt_kern.h int con_set_trans_old(unsigned char __user * table); table 66 include/linux/vt_kern.h int con_get_trans_old(unsigned char __user * table); table 67 include/linux/vt_kern.h int con_set_trans_new(unsigned short __user * table); table 68 include/linux/vt_kern.h int con_get_trans_new(unsigned short __user * table); table 79 include/linux/vt_kern.h static inline int con_set_trans_old(unsigned char __user *table) table 83 include/linux/vt_kern.h static inline int con_get_trans_old(unsigned char __user *table) table 87 include/linux/vt_kern.h static inline int con_set_trans_new(unsigned short __user *table) table 91 include/linux/vt_kern.h static inline int con_get_trans_new(unsigned short __user *table) table 365 include/linux/writeback.h extern int dirty_background_ratio_handler(struct ctl_table *table, int write, table 368 include/linux/writeback.h extern int dirty_background_bytes_handler(struct ctl_table *table, int write, table 371 include/linux/writeback.h extern int dirty_ratio_handler(struct ctl_table *table, int write, table 374 include/linux/writeback.h extern int dirty_bytes_handler(struct ctl_table *table, int write, table 377 include/linux/writeback.h int dirtytime_interval_handler(struct ctl_table *table, int write, table 88 include/media/davinci/isif.h struct isif_vdfc_entry table[ISIF_VDFC_TABLE_SIZE]; table 243 include/media/davinci/isif.h __u16 table[ISIF_LINEAR_TAB_SIZE]; table 121 include/net/6lowpan.h struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE]; table 73 include/net/cfg802154.h struct ieee802154_llsec_table **table); table 26 include/net/fib_rules.h u32 table; table 53 include/net/fib_rules.h u32 table; table 140 include/net/fib_rules.h return rule->l3mdev ? arg->table : rule->table; table 146 include/net/fib_rules.h return rule->table; table 154 include/net/fib_rules.h return frh->table; table 195 include/net/fib_rules.h int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, table 405 include/net/ip6_fib.h int fib6_table_lookup(struct net *net, struct fib6_table *table, table 113 include/net/ip6_route.h struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, table 174 include/net/ip_fib.h struct fib_table *table; table 261 include/net/ip_fib.h int fib_table_dump(struct fib_table *table, struct sk_buff *skb, table 263 include/net/ip_fib.h int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); table 265 include/net/ip_fib.h void fib_table_flush_external(struct fib_table *table); table 447 include/net/ip_fib.h int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, table 1160 include/net/ip_vs.h void ip_vs_init_hash_table(struct list_head *table, int rows); table 1366 include/net/ip_vs.h int *ip_vs_create_timeout_table(int *table, int size); table 36 include/net/ipv6_stubs.h int (*fib6_table_lookup)(struct net *net, struct fib6_table *table, table 412 include/net/net_namespace.h struct ctl_table *table); table 417 include/net/net_namespace.h const char *path, struct ctl_table *table) table 171 include/net/netfilter/nf_tables.h struct nft_table *table; table 425 include/net/netfilter/nf_tables.h struct nft_table *table; table 467 include/net/netfilter/nf_tables.h const struct nft_table *table, table 913 include/net/netfilter/nf_tables.h struct nft_table *table; table 1053 include/net/netfilter/nf_tables.h const struct nft_table *table; table 1089 include/net/netfilter/nf_tables.h const struct nft_table *table, table 1093 include/net/netfilter/nf_tables.h void nft_obj_notify(struct net *net, const struct nft_table *table, table 1170 include/net/netfilter/nf_tables.h struct nft_table *table; table 1183 include/net/netfilter/nf_tables.h struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table, table 15 include/net/netns/xfrm.h struct hlist_head __rcu *table; table 79 include/net/udp.h static inline struct udp_hslot *udp_hashslot(struct udp_table *table, table 82 include/net/udp.h return &table->hash[udp_hashfn(net, num, table->mask)]; table 88 include/net/udp.h static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, table 91 include/net/udp.h return &table->hash2[hash & table->mask]; table 97 include/rdma/rdmavt_mr.h struct rvt_mregion __rcu **table; table 36 include/scsi/scsi_cmnd.h struct sg_table table; table 180 include/scsi/scsi_cmnd.h return cmd->sdb.table.nents; table 185 include/scsi/scsi_cmnd.h return cmd->sdb.table.sgl; table 294 include/scsi/scsi_cmnd.h return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0; table 299 include/scsi/scsi_cmnd.h return cmd->prot_sdb ? cmd->prot_sdb->table.sgl : NULL; table 85 include/sound/memalloc.h struct snd_sg_page *table; /* address table */ table 97 include/sound/memalloc.h dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; table 109 include/sound/memalloc.h return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; table 105 include/sound/soundfont.h struct snd_sf_zone **table, int max_layers); table 16 include/trace/events/fib6.h struct fib6_table *table, const struct flowi6 *flp), table 18 include/trace/events/fib6.h TP_ARGS(net, res, table, flp), table 41 include/trace/events/fib6.h __entry->tb_id = table->tb6_id; table 25 include/uapi/linux/fib_rules.h __u8 table; table 76 include/uapi/linux/map_to_7segment.h unsigned char table[128]; table 81 include/uapi/linux/map_to_7segment.h return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL; table 85 include/uapi/linux/map_to_7segment.h struct seg7_conversion_map _name = { .table = { _map } } table 41 include/uapi/linux/netfilter_bridge/ebt_among.h int table[257]; table 505 include/uapi/linux/omap3isp.h __u32 table[4][OMAP3ISP_PREV_CFA_BLK_SIZE]; table 608 include/uapi/linux/omap3isp.h __u32 table[OMAP3ISP_PREV_NF_TBL_SIZE]; table 628 include/uapi/linux/omap3isp.h __u32 table[OMAP3ISP_PREV_YENH_TBL_SIZE]; table 17 ipc/ipc_sysctl.c static void *get_ipc(struct ctl_table *table) table 19 ipc/ipc_sysctl.c char *which = table->data; table 26 ipc/ipc_sysctl.c static int proc_ipc_dointvec(struct ctl_table *table, int write, table 31 ipc/ipc_sysctl.c memcpy(&ipc_table, table, sizeof(ipc_table)); table 32 ipc/ipc_sysctl.c ipc_table.data = get_ipc(table); table 37 ipc/ipc_sysctl.c static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write, table 42 ipc/ipc_sysctl.c memcpy(&ipc_table, table, sizeof(ipc_table)); table 43 ipc/ipc_sysctl.c ipc_table.data = get_ipc(table); table 48 ipc/ipc_sysctl.c static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, table 52 ipc/ipc_sysctl.c int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos); table 61 ipc/ipc_sysctl.c static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write, table 65 ipc/ipc_sysctl.c memcpy(&ipc_table, table, sizeof(ipc_table)); table 66 ipc/ipc_sysctl.c ipc_table.data = get_ipc(table); table 72 ipc/ipc_sysctl.c static int proc_ipc_auto_msgmni(struct ctl_table *table, int write, table 78 ipc/ipc_sysctl.c memcpy(&ipc_table, table, sizeof(ipc_table)); table 87 ipc/ipc_sysctl.c static int proc_ipc_sem_dointvec(struct ctl_table *table, int write, table 94 ipc/ipc_sysctl.c ret = proc_ipc_dointvec(table, write, buffer, lenp, ppos); table 13 ipc/mq_sysctl.c static void *get_mq(struct ctl_table *table) table 15 ipc/mq_sysctl.c char *which = table->data; table 21 ipc/mq_sysctl.c static int proc_mq_dointvec(struct ctl_table *table, int write, table 25 ipc/mq_sysctl.c memcpy(&mq_table, table, sizeof(mq_table)); table 26 ipc/mq_sysctl.c mq_table.data = get_mq(table); table 31 ipc/mq_sysctl.c static int proc_mq_dointvec_minmax(struct ctl_table *table, int write, table 35 ipc/mq_sysctl.c memcpy(&mq_table, table, sizeof(mq_table)); table 36 ipc/mq_sysctl.c mq_table.data = get_mq(table); table 885 kernel/bpf/cgroup.c struct ctl_table *table, int write, table 892 kernel/bpf/cgroup.c .table = table, table 911 kernel/bpf/cgroup.c if (table->proc_handler(table, 0, (void __user *)ctx.cur_val, table 1192 kernel/bpf/cgroup.c ret = strscpy(buf, ctx->table->procname, buf_len); table 238 kernel/events/callchain.c int perf_event_max_stack_handler(struct ctl_table *table, int write, table 241 kernel/events/callchain.c int *value = table->data; table 243 kernel/events/callchain.c struct ctl_table new_table = *table; table 441 kernel/events/core.c int perf_proc_update_handler(struct ctl_table *table, int write, table 453 kernel/events/core.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 466 kernel/events/core.c int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, table 470 kernel/events/core.c int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 2954 kernel/fork.c int sysctl_max_threads(struct ctl_table *table, int write, table 2963 kernel/fork.c t = *table; table 221 kernel/hung_task.c int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, table 227 kernel/hung_task.c ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 894 kernel/kprobes.c int proc_kprobes_optimization_handler(struct ctl_table *table, int write, table 902 kernel/kprobes.c ret = proc_dointvec_minmax(table, write, buffer, length, ppos); table 272 kernel/latencytop.c int sysctl_latencytop(struct ctl_table *table, int write, table 277 kernel/latencytop.c err = proc_dointvec(table, write, buffer, lenp, ppos); table 267 kernel/pid_namespace.c static int pid_ns_ctl_handler(struct ctl_table *table, int write, table 271 kernel/pid_namespace.c struct ctl_table tmp = *table; table 67 kernel/power/energy_model.c em_debug_create_cs(&pd->table[i], d); table 87 kernel/power/energy_model.c struct em_cap_state *table; table 98 kernel/power/energy_model.c table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL); table 99 kernel/power/energy_model.c if (!table) table 133 kernel/power/energy_model.c table[i].power = power; table 134 kernel/power/energy_model.c table[i].frequency = prev_freq = freq; table 150 kernel/power/energy_model.c fmax = (u64) table[nr_states - 1].frequency; table 152 kernel/power/energy_model.c table[i].cost = div64_u64(fmax * table[i].power, table 153 kernel/power/energy_model.c table[i].frequency); table 156 kernel/power/energy_model.c pd->table = table; table 165 kernel/power/energy_model.c kfree(table); table 175 kernel/printk/printk.c int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, table 190 kernel/printk/printk.c err = proc_dostring(table, write, buffer, lenp, ppos); table 1113 kernel/sched/core.c int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, table 1125 kernel/sched/core.c result = proc_dointvec(table, write, buffer, lenp, ppos); table 2733 kernel/sched/core.c int sysctl_numa_balancing(struct ctl_table *table, int write, table 2743 kernel/sched/core.c t = *table; table 2808 kernel/sched/core.c int sysctl_schedstats(struct ctl_table *table, int write, table 2818 kernel/sched/core.c t = *table; table 251 kernel/sched/debug.c struct ctl_table *table = sd_alloc_ctl_entry(9); table 253 kernel/sched/debug.c if (table == NULL) table 256 kernel/sched/debug.c set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax); table 257 kernel/sched/debug.c set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax); table 258 kernel/sched/debug.c set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax); table 259 kernel/sched/debug.c set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); table 260 kernel/sched/debug.c set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); table 261 kernel/sched/debug.c set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); table 262 kernel/sched/debug.c set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax); table 263 kernel/sched/debug.c set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); table 266 kernel/sched/debug.c return table; table 271 kernel/sched/debug.c struct ctl_table *entry, *table; table 278 kernel/sched/debug.c entry = table = sd_alloc_ctl_entry(domain_num + 1); table 279 kernel/sched/debug.c if (table == NULL) table 291 kernel/sched/debug.c return table; table 635 kernel/sched/fair.c int sched_proc_update_handler(struct ctl_table *table, int write, table 639 kernel/sched/fair.c int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 2649 kernel/sched/rt.c int sched_rt_handler(struct ctl_table *table, int write, table 2661 kernel/sched/rt.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 2689 kernel/sched/rt.c int sched_rr_handler(struct ctl_table *table, int write, table 2697 kernel/sched/rt.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 211 kernel/sched/topology.c int sched_energy_aware_handler(struct ctl_table *table, int write, table 219 kernel/sched/topology.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 1683 kernel/seccomp.c struct ctl_table table; table 1691 kernel/seccomp.c table = *ro_table; table 1692 kernel/seccomp.c table.data = names; table 1693 kernel/seccomp.c table.maxlen = sizeof(names); table 1694 kernel/seccomp.c return proc_dostring(&table, 0, buffer, lenp, ppos); table 1701 kernel/seccomp.c struct ctl_table table; table 1709 kernel/seccomp.c table = *ro_table; table 1710 kernel/seccomp.c table.data = names; table 1711 kernel/seccomp.c table.maxlen = sizeof(names); table 1712 kernel/seccomp.c ret = proc_dostring(&table, 1, buffer, lenp, ppos); table 1716 kernel/seccomp.c if (!seccomp_actions_logged_from_names(actions_logged, table.data)) table 22 kernel/stackleak.c int stack_erasing_sysctl(struct ctl_table *table, int write, table 29 kernel/stackleak.c table->data = &state; table 30 kernel/stackleak.c table->maxlen = sizeof(int); table 31 kernel/stackleak.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 211 kernel/sysctl.c static int proc_do_cad_pid(struct ctl_table *table, int write, table 213 kernel/sysctl.c static int proc_taint(struct ctl_table *table, int write, table 218 kernel/sysctl.c static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, table 222 kernel/sysctl.c static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, table 225 kernel/sysctl.c static int proc_dostring_coredump(struct ctl_table *table, int write, table 228 kernel/sysctl.c static int proc_dopipe_max_size(struct ctl_table *table, int write, table 235 kernel/sysctl.c static int sysrq_sysctl_handler(struct ctl_table *table, int write, table 241 kernel/sysctl.c error = proc_dointvec(table, write, buffer, lenp, ppos); table 2067 kernel/sysctl.c static void warn_sysctl_write(struct ctl_table *table) table 2072 kernel/sysctl.c current->comm, table->procname); table 2085 kernel/sysctl.c struct ctl_table *table) table 2094 kernel/sysctl.c warn_sysctl_write(table); table 2118 kernel/sysctl.c int proc_dostring(struct ctl_table *table, int write, table 2122 kernel/sysctl.c proc_first_pos_non_zero_ignore(ppos, table); table 2124 kernel/sysctl.c return _proc_do_string((char *)(table->data), table->maxlen, write, table 2333 kernel/sysctl.c static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, table 2344 kernel/sysctl.c if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { table 2350 kernel/sysctl.c vleft = table->maxlen / sizeof(*i); table 2357 kernel/sysctl.c if (proc_first_pos_non_zero_ignore(ppos, table)) table 2415 kernel/sysctl.c static int do_proc_dointvec(struct ctl_table *table, int write, table 2421 kernel/sysctl.c return __do_proc_dointvec(table->data, table, write, table 2426 kernel/sysctl.c struct ctl_table *table, table 2442 kernel/sysctl.c if (proc_first_pos_non_zero_ignore(ppos, table)) table 2518 kernel/sysctl.c static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table, table 2528 kernel/sysctl.c if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { table 2534 kernel/sysctl.c vleft = table->maxlen / sizeof(*i); table 2549 kernel/sysctl.c return do_proc_douintvec_w(i, table, buffer, lenp, ppos, table 2554 kernel/sysctl.c static int do_proc_douintvec(struct ctl_table *table, int write, table 2561 kernel/sysctl.c return __do_proc_douintvec(table->data, table, write, table 2578 kernel/sysctl.c int proc_dointvec(struct ctl_table *table, int write, table 2581 kernel/sysctl.c return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL); table 2597 kernel/sysctl.c int proc_douintvec(struct ctl_table *table, int write, table 2600 kernel/sysctl.c return do_proc_douintvec(table, write, buffer, lenp, ppos, table 2608 kernel/sysctl.c static int proc_taint(struct ctl_table *table, int write, table 2618 kernel/sysctl.c t = *table; table 2640 kernel/sysctl.c static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, table 2646 kernel/sysctl.c return proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 2706 kernel/sysctl.c int proc_dointvec_minmax(struct ctl_table *table, int write, table 2710 kernel/sysctl.c .min = (int *) table->extra1, table 2711 kernel/sysctl.c .max = (int *) table->extra2, table 2713 kernel/sysctl.c return do_proc_dointvec(table, write, buffer, lenp, ppos, table 2775 kernel/sysctl.c int proc_douintvec_minmax(struct ctl_table *table, int write, table 2779 kernel/sysctl.c .min = (unsigned int *) table->extra1, table 2780 kernel/sysctl.c .max = (unsigned int *) table->extra2, table 2782 kernel/sysctl.c return do_proc_douintvec(table, write, buffer, lenp, ppos, table 2806 kernel/sysctl.c static int proc_dopipe_max_size(struct ctl_table *table, int write, table 2809 kernel/sysctl.c return do_proc_douintvec(table, write, buffer, lenp, ppos, table 2827 kernel/sysctl.c static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, table 2830 kernel/sysctl.c int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 2837 kernel/sysctl.c static int proc_dostring_coredump(struct ctl_table *table, int write, table 2840 kernel/sysctl.c int error = proc_dostring(table, write, buffer, lenp, ppos); table 2847 kernel/sysctl.c static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, table 2858 kernel/sysctl.c if (!data || !table->maxlen || !*lenp || (*ppos && !write)) { table 2864 kernel/sysctl.c min = (unsigned long *) table->extra1; table 2865 kernel/sysctl.c max = (unsigned long *) table->extra2; table 2866 kernel/sysctl.c vleft = table->maxlen / sizeof(unsigned long); table 2870 kernel/sysctl.c if (proc_first_pos_non_zero_ignore(ppos, table)) table 2931 kernel/sysctl.c static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, table 2937 kernel/sysctl.c return __do_proc_doulongvec_minmax(table->data, table, write, table 2957 kernel/sysctl.c int proc_doulongvec_minmax(struct ctl_table *table, int write, table 2960 kernel/sysctl.c return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); table 2980 kernel/sysctl.c int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, table 2984 kernel/sysctl.c return do_proc_doulongvec_minmax(table, write, buffer, table 3075 kernel/sysctl.c int proc_dointvec_jiffies(struct ctl_table *table, int write, table 3078 kernel/sysctl.c return do_proc_dointvec(table,write,buffer,lenp,ppos, table 3097 kernel/sysctl.c int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, table 3100 kernel/sysctl.c return do_proc_dointvec(table,write,buffer,lenp,ppos, table 3120 kernel/sysctl.c int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, table 3123 kernel/sysctl.c return do_proc_dointvec(table, write, buffer, lenp, ppos, table 3127 kernel/sysctl.c static int proc_do_cad_pid(struct ctl_table *table, int write, table 3136 kernel/sysctl.c r = __do_proc_dointvec(&tmp, table, write, buffer, table 3166 kernel/sysctl.c int proc_do_large_bitmap(struct ctl_table *table, int write, table 3172 kernel/sysctl.c unsigned long bitmap_len = table->maxlen; table 3173 kernel/sysctl.c unsigned long *bitmap = *(unsigned long **) table->data; table 3316 kernel/sysctl.c int proc_dostring(struct ctl_table *table, int write, table 3322 kernel/sysctl.c int proc_dointvec(struct ctl_table *table, int write, table 3328 kernel/sysctl.c int proc_douintvec(struct ctl_table *table, int write, table 3334 kernel/sysctl.c int proc_dointvec_minmax(struct ctl_table *table, int write, table 3340 kernel/sysctl.c int proc_douintvec_minmax(struct ctl_table *table, int write, table 3346 kernel/sysctl.c int proc_dointvec_jiffies(struct ctl_table *table, int write, table 3352 kernel/sysctl.c int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, table 3358 kernel/sysctl.c int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, table 3364 kernel/sysctl.c int proc_doulongvec_minmax(struct ctl_table *table, int write, table 3370 kernel/sysctl.c int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, table 3377 kernel/sysctl.c int proc_do_large_bitmap(struct ctl_table *table, int write, table 3386 kernel/sysctl.c int proc_do_static_key(struct ctl_table *table, int write, table 3390 kernel/sysctl.c struct static_key *key = (struct static_key *)table->data; table 3396 kernel/sysctl.c .mode = table->mode, table 1207 kernel/sysctl_binary.c const struct bin_table *table = &bin_root_table[0]; table 1223 kernel/sysctl_binary.c for ( ; table->convert; table++) { table 1230 kernel/sysctl_binary.c if (!table->ctl_name) { table 1242 kernel/sysctl_binary.c } else if (ctl_name == table->ctl_name) { table 1243 kernel/sysctl_binary.c len = strlen(table->procname); table 1244 kernel/sysctl_binary.c memcpy(path, table->procname, len); table 1248 kernel/sysctl_binary.c if (table->child) { table 1250 kernel/sysctl_binary.c table = table->child; table 1254 kernel/sysctl_binary.c return table; table 1267 kernel/sysctl_binary.c const struct bin_table *table = get_sysctl(name, nlen, tmp); table 1269 kernel/sysctl_binary.c *tablep = table; table 1270 kernel/sysctl_binary.c if (IS_ERR(table)) { table 1272 kernel/sysctl_binary.c result = ERR_CAST(table); table 1281 kernel/sysctl_binary.c const struct bin_table *table = NULL; table 1288 kernel/sysctl_binary.c pathname = sysctl_getname(name, nlen, &table); table 1311 kernel/sysctl_binary.c result = table->convert(file, oldval, oldlen, newval, newlen); table 251 kernel/time/timer.c int timer_migration_handler(struct ctl_table *table, int write, table 258 kernel/time/timer.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 6766 kernel/trace/ftrace.c ftrace_enable_sysctl(struct ctl_table *table, int write, table 6777 kernel/trace/ftrace.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 2556 kernel/trace/trace.c int tracepoint_printk_sysctl(struct ctl_table *table, int write, table 2566 kernel/trace/trace.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 518 kernel/trace/trace_stack.c stack_trace_sysctl(struct ctl_table *table, int write, table 528 kernel/trace/trace_stack.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 35 kernel/ucount.c struct ctl_table *table) table 43 kernel/ucount.c mode = (table->mode & S_IRWXU) >> 6; table 46 kernel/ucount.c mode = table->mode & S_IROTH; table 643 kernel/umh.c static int proc_cap_handler(struct ctl_table *table, int write, table 661 kernel/umh.c if (table->data == CAP_BSET) table 663 kernel/umh.c else if (table->data == CAP_PI) table 670 kernel/umh.c t = *table; table 693 kernel/umh.c if (table->data == CAP_BSET) table 695 kernel/umh.c if (table->data == CAP_PI) table 17 kernel/utsname_sysctl.c static void *get_uts(struct ctl_table *table) table 19 kernel/utsname_sysctl.c char *which = table->data; table 32 kernel/utsname_sysctl.c static int proc_do_uts_string(struct ctl_table *table, int write, table 39 kernel/utsname_sysctl.c memcpy(&uts_table, table, sizeof(uts_table)); table 49 kernel/utsname_sysctl.c memcpy(tmp_data, get_uts(table), sizeof(tmp_data)); table 61 kernel/utsname_sysctl.c memcpy(get_uts(table), tmp_data, sizeof(tmp_data)); table 63 kernel/utsname_sysctl.c proc_sys_poll_notify(table->poll); table 132 kernel/utsname_sysctl.c struct ctl_table *table = &uts_kern_table[proc]; table 134 kernel/utsname_sysctl.c proc_sys_poll_notify(table->poll); table 682 kernel/watchdog.c static int proc_watchdog_common(int which, struct ctl_table *table, int write, table 685 kernel/watchdog.c int err, old, *param = table->data; table 695 kernel/watchdog.c err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 698 kernel/watchdog.c err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 709 kernel/watchdog.c int proc_watchdog(struct ctl_table *table, int write, table 713 kernel/watchdog.c table, write, buffer, lenp, ppos); table 719 kernel/watchdog.c int proc_nmi_watchdog(struct ctl_table *table, int write, table 725 kernel/watchdog.c table, write, buffer, lenp, ppos); table 731 kernel/watchdog.c int proc_soft_watchdog(struct ctl_table *table, int write, table 735 kernel/watchdog.c table, write, buffer, lenp, ppos); table 741 kernel/watchdog.c int proc_watchdog_thresh(struct ctl_table *table, int write, table 749 kernel/watchdog.c err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 764 kernel/watchdog.c int proc_watchdog_cpumask(struct ctl_table *table, int write, table 771 kernel/watchdog.c err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); table 29 lib/crc8.c void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) table 35 lib/crc8.c table[0] = 0; table 40 lib/crc8.c table[i+j] = table[j] ^ t; table 51 lib/crc8.c void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) table 56 lib/crc8.c table[0] = 0; table 61 lib/crc8.c table[i+j] = table[j] ^ t; table 74 lib/crc8.c u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) table 78 lib/crc8.c crc = table[(crc ^ *pdata++) & 0xff]; table 287 lib/devres.c void __iomem *table[PCIM_IOMAP_MAX]; table 297 lib/devres.c if (this->table[i]) table 298 lib/devres.c pci_iounmap(dev, this->table[i]); table 320 lib/devres.c return dr->table; table 326 lib/devres.c return dr->table; table 61 lib/dynamic_debug.c struct ddebug_table *table; table 730 lib/dynamic_debug.c iter->table = NULL; table 734 lib/dynamic_debug.c iter->table = list_entry(ddebug_tables.next, table 737 lib/dynamic_debug.c return &iter->table->ddebugs[iter->idx]; table 748 lib/dynamic_debug.c if (iter->table == NULL) table 750 lib/dynamic_debug.c if (++iter->idx == iter->table->num_ddebugs) { table 753 lib/dynamic_debug.c if (list_is_last(&iter->table->link, &ddebug_tables)) { table 754 lib/dynamic_debug.c iter->table = NULL; table 757 lib/dynamic_debug.c iter->table = list_entry(iter->table->link.next, table 760 lib/dynamic_debug.c return &iter->table->ddebugs[iter->idx]; table 831 lib/dynamic_debug.c iter->table->mod_name, dp->function, table 93 lib/gen_crc32table.c static void output_table(uint32_t (*table)[256], int rows, int len, char *trans) table 102 lib/gen_crc32table.c printf("%s(0x%8.8xL), ", trans, table[j][i]); table 104 lib/gen_crc32table.c printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]); table 105 lib/parser.c int match_token(char *s, const match_table_t table, substring_t args[]) table 109 lib/parser.c for (p = table; !match_one(s, p->pattern, args) ; p++) table 33 lib/rhashtable.c union nested_table __rcu *table; table 72 lib/rhashtable.c ntbl = rcu_dereference_raw(ntbl->table); table 1177 lib/rhashtable.c ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); table 1182 lib/rhashtable.c ntbl = rht_dereference_bucket_rcu(ntbl[index].table, table 1218 lib/rhashtable.c ntbl = nested_table_alloc(ht, &ntbl[index].table, table 1225 lib/rhashtable.c ntbl = nested_table_alloc(ht, &ntbl[index].table, table 192 lib/scatterlist.c void __sg_free_table(struct sg_table *table, unsigned int max_ents, table 198 lib/scatterlist.c if (unlikely(!table->sgl)) table 201 lib/scatterlist.c sgl = table->sgl; table 202 lib/scatterlist.c while (table->orig_nents) { table 203 lib/scatterlist.c unsigned int alloc_size = table->orig_nents; table 221 lib/scatterlist.c table->orig_nents -= sg_size; table 230 lib/scatterlist.c table->sgl = NULL; table 239 lib/scatterlist.c void sg_free_table(struct sg_table *table) table 241 lib/scatterlist.c __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); table 266 lib/scatterlist.c int __sg_alloc_table(struct sg_table *table, unsigned int nents, table 276 lib/scatterlist.c memset(table, 0, sizeof(*table)); table 312 lib/scatterlist.c table->nents = ++table->orig_nents; table 318 lib/scatterlist.c table->nents = table->orig_nents += sg_size; table 327 lib/scatterlist.c table->sgl = sg; table 355 lib/scatterlist.c int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) table 359 lib/scatterlist.c ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, table 362 lib/scatterlist.c __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); table 84 lib/sg_pool.c void sg_free_table_chained(struct sg_table *table, table 87 lib/sg_pool.c if (table->orig_nents <= nents_first_chunk) table 93 lib/sg_pool.c __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free); table 111 lib/sg_pool.c int sg_alloc_table_chained(struct sg_table *table, int nents, table 120 lib/sg_pool.c table->nents = table->orig_nents = nents; table 121 lib/sg_pool.c sg_init_table(table->sgl, nents); table 132 lib/sg_pool.c ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, table 136 lib/sg_pool.c sg_free_table_chained(table, nents_first_chunk); table 24 lib/zlib_inflate/inftrees.c code **table, unsigned *bits, unsigned short *work) table 107 lib/zlib_inflate/inftrees.c *(*table)++ = this; /* make a table to force an error */ table 108 lib/zlib_inflate/inftrees.c *(*table)++ = this; table 190 lib/zlib_inflate/inftrees.c next = *table; /* current table to fill in */ table 271 lib/zlib_inflate/inftrees.c (*table)[low].op = (unsigned char)curr; table 272 lib/zlib_inflate/inftrees.c (*table)[low].bits = (unsigned char)root; table 273 lib/zlib_inflate/inftrees.c (*table)[low].val = (unsigned short)(next - *table); table 292 lib/zlib_inflate/inftrees.c next = *table; table 312 lib/zlib_inflate/inftrees.c *table += used; table 57 lib/zlib_inflate/inftrees.h unsigned codes, code **table, table 397 lib/zstd/compress.c static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue) table 401 lib/zstd/compress.c if (table[u] < reducerValue) table 402 lib/zstd/compress.c table[u] = 0; table 404 lib/zstd/compress.c table[u] -= reducerValue; table 355 lib/zstd/fse.h const void *table; /* precise table may vary, depending on U16 */ table 487 lib/zstd/fse.h DStatePtr->table = dt + 1; table 492 lib/zstd/fse.h FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; table 498 lib/zstd/fse.h FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; table 506 lib/zstd/fse.h FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; table 519 lib/zstd/fse.h FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state]; table 74 lib/zstd/huf_decompress.c static DTableDesc HUF_getDTableDesc(const HUF_DTable *table) table 77 lib/zstd/huf_decompress.c memcpy(&dtd, table, sizeof(dtd)); table 2457 mm/compaction.c int sysctl_compaction_handler(struct ctl_table *table, int write, table 3086 mm/hugetlb.c struct ctl_table *table, int write, table 3096 mm/hugetlb.c table->data = &tmp; table 3097 mm/hugetlb.c table->maxlen = sizeof(unsigned long); table 3098 mm/hugetlb.c ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); table 3109 mm/hugetlb.c int hugetlb_sysctl_handler(struct ctl_table *table, int write, table 3113 mm/hugetlb.c return hugetlb_sysctl_handler_common(false, table, write, table 3118 mm/hugetlb.c int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, table 3121 mm/hugetlb.c return hugetlb_sysctl_handler_common(true, table, write, table 3126 mm/hugetlb.c int hugetlb_overcommit_handler(struct ctl_table *table, int write, table 3142 mm/hugetlb.c table->data = &tmp; table 3143 mm/hugetlb.c table->maxlen = sizeof(unsigned long); table 3144 mm/hugetlb.c ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); table 120 mm/mmu_gather.c static void tlb_remove_table_one(void *table) table 130 mm/mmu_gather.c __tlb_remove_table(table); table 157 mm/mmu_gather.c void tlb_remove_table(struct mmu_gather *tlb, void *table) table 165 mm/mmu_gather.c tlb_remove_table_one(table); table 171 mm/mmu_gather.c (*batch)->tables[(*batch)->nr++] = table; table 514 mm/page-writeback.c int dirty_background_ratio_handler(struct ctl_table *table, int write, table 520 mm/page-writeback.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 526 mm/page-writeback.c int dirty_background_bytes_handler(struct ctl_table *table, int write, table 532 mm/page-writeback.c ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 538 mm/page-writeback.c int dirty_ratio_handler(struct ctl_table *table, int write, table 545 mm/page-writeback.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 553 mm/page-writeback.c int dirty_bytes_handler(struct ctl_table *table, int write, table 560 mm/page-writeback.c ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 1974 mm/page-writeback.c int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, table 1980 mm/page-writeback.c ret = proc_dointvec(table, write, buffer, length, ppos); table 5498 mm/page_alloc.c int numa_zonelist_order_handler(struct ctl_table *table, int write, table 5506 mm/page_alloc.c return proc_dostring(table, write, buffer, length, ppos); table 7882 mm/page_alloc.c int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, table 7887 mm/page_alloc.c rc = proc_dointvec_minmax(table, write, buffer, length, ppos); table 7898 mm/page_alloc.c int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write, table 7903 mm/page_alloc.c rc = proc_dointvec_minmax(table, write, buffer, length, ppos); table 7910 mm/page_alloc.c int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, table 7915 mm/page_alloc.c rc = proc_dointvec_minmax(table, write, buffer, length, ppos); table 7940 mm/page_alloc.c int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, table 7945 mm/page_alloc.c rc = proc_dointvec_minmax(table, write, buffer, length, ppos); table 7967 mm/page_alloc.c int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, table 7972 mm/page_alloc.c rc = proc_dointvec_minmax(table, write, buffer, length, ppos); table 7991 mm/page_alloc.c int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, table 7994 mm/page_alloc.c proc_dointvec_minmax(table, write, buffer, length, ppos); table 8004 mm/page_alloc.c int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, table 8014 mm/page_alloc.c ret = proc_dointvec_minmax(table, write, buffer, length, ppos); table 8086 mm/page_alloc.c void *table = NULL; table 8149 mm/page_alloc.c table = memblock_alloc(size, SMP_CACHE_BYTES); table 8151 mm/page_alloc.c table = memblock_alloc_raw(size, table 8154 mm/page_alloc.c table = __vmalloc(size, gfp_flags, PAGE_KERNEL); table 8162 mm/page_alloc.c table = alloc_pages_exact(size, gfp_flags); table 8163 mm/page_alloc.c kmemleak_alloc(table, size, 1, gfp_flags); table 8165 mm/page_alloc.c } while (!table && size > PAGE_SIZE && --log2qty); table 8167 mm/page_alloc.c if (!table) table 8179 mm/page_alloc.c return table; table 728 mm/util.c int overcommit_ratio_handler(struct ctl_table *table, int write, table 734 mm/util.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 740 mm/util.c int overcommit_kbytes_handler(struct ctl_table *table, int write, table 746 mm/util.c ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 78 mm/vmstat.c int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, table 86 mm/vmstat.c ret = proc_dointvec_minmax(table, write, buffer, length, ppos); table 1758 mm/vmstat.c int vmstat_refresh(struct ctl_table *table, int write, table 37 net/6lowpan/core.c lowpan_dev(dev)->ctx.table[i].id = i; table 136 net/6lowpan/core.c &lowpan_dev(dev)->ctx.table[i].flags); table 70 net/6lowpan/debugfs.c container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); table 86 net/6lowpan/debugfs.c container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); table 101 net/6lowpan/debugfs.c container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); table 131 net/6lowpan/debugfs.c container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); table 179 net/6lowpan/debugfs.c debugfs_create_file("active", 0644, root, &ldev->ctx.table[id], table 182 net/6lowpan/debugfs.c debugfs_create_file("compression", 0644, root, &ldev->ctx.table[id], table 185 net/6lowpan/debugfs.c debugfs_create_file("prefix", 0644, root, &ldev->ctx.table[id], table 188 net/6lowpan/debugfs.c debugfs_create_file("prefix_len", 0644, root, &ldev->ctx.table[id], table 202 net/6lowpan/debugfs.c if (!lowpan_iphc_ctx_is_active(&t->table[i])) table 205 net/6lowpan/debugfs.c seq_printf(file, "%3d|%39pI6c/%-3d|%d\n", t->table[i].id, table 206 net/6lowpan/debugfs.c &t->table[i].pfx, t->table[i].plen, table 207 net/6lowpan/debugfs.c lowpan_iphc_ctx_is_compression(&t->table[i])); table 192 net/6lowpan/iphc.c struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id]; table 204 net/6lowpan/iphc.c struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; table 214 net/6lowpan/iphc.c if (!lowpan_iphc_ctx_is_active(&table[i]) || table 215 net/6lowpan/iphc.c !lowpan_iphc_ctx_is_compression(&table[i])) table 218 net/6lowpan/iphc.c ipv6_addr_prefix(&addr_pfx, addr, table[i].plen); table 223 net/6lowpan/iphc.c if (table[i].plen < 64) table 226 net/6lowpan/iphc.c addr_plen = table[i].plen; table 228 net/6lowpan/iphc.c if (ipv6_prefix_equal(&addr_pfx, &table[i].pfx, addr_plen)) { table 231 net/6lowpan/iphc.c ret = &table[i]; table 236 net/6lowpan/iphc.c if (table[i].plen > ret->plen) table 237 net/6lowpan/iphc.c ret = &table[i]; table 248 net/6lowpan/iphc.c struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table; table 260 net/6lowpan/iphc.c if (!lowpan_iphc_ctx_is_active(&table[i]) || table 261 net/6lowpan/iphc.c !lowpan_iphc_ctx_is_compression(&table[i])) table 265 net/6lowpan/iphc.c addr_mcast.s6_addr[3] = table[i].plen; table 267 net/6lowpan/iphc.c ipv6_addr_prefix(&network_pfx, &table[i].pfx, table 268 net/6lowpan/iphc.c table[i].plen); table 273 net/6lowpan/iphc.c ret = &table[i]; table 922 net/appletalk/aarp.c struct aarp_entry **table = iter->table; table 928 net/appletalk/aarp.c for (entry = table[ct]; entry; entry = entry->next) { table 930 net/appletalk/aarp.c iter->table = table; table 938 net/appletalk/aarp.c if (table == resolved) { table 940 net/appletalk/aarp.c table = unresolved; table 943 net/appletalk/aarp.c if (table == unresolved) { table 945 net/appletalk/aarp.c table = proxies; table 957 net/appletalk/aarp.c iter->table = resolved; table 1019 net/appletalk/aarp.c if (iter->table == unresolved) table 1026 net/appletalk/aarp.c (iter->table == resolved) ? "resolved" table 1027 net/appletalk/aarp.c : (iter->table == unresolved) ? "unresolved" table 1028 net/appletalk/aarp.c : (iter->table == proxies) ? "proxies" table 1087 net/atm/lec.c struct lec_arp_table *table; table 1093 net/atm/lec.c table = lec_arp_find(priv, dst_mac); table 1095 net/atm/lec.c if (table == NULL) table 1098 net/atm/lec.c *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC); table 1102 net/atm/lec.c *sizeoftlvs = table->sizeoftlvs; table 152 net/ax25/sysctl_net_ax25.c struct ctl_table *table; table 154 net/ax25/sysctl_net_ax25.c table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL); table 155 net/ax25/sysctl_net_ax25.c if (!table) table 159 net/ax25/sysctl_net_ax25.c table[k].data = &ax25_dev->values[k]; table 162 net/ax25/sysctl_net_ax25.c ax25_dev->sysheader = register_net_sysctl(&init_net, path, table); table 164 net/ax25/sysctl_net_ax25.c kfree(table); table 173 net/ax25/sysctl_net_ax25.c struct ctl_table *table; table 177 net/ax25/sysctl_net_ax25.c table = header->ctl_table_arg; table 179 net/ax25/sysctl_net_ax25.c kfree(table); table 752 net/batman-adv/bat_iv_ogm.c head = &hash->table[i]; table 1835 net/batman-adv/bat_iv_ogm.c head = &hash->table[i]; table 2094 net/batman-adv/bat_iv_ogm.c head = &hash->table[bucket]; table 365 net/batman-adv/bat_v.c head = &hash->table[i]; table 589 net/batman-adv/bat_v.c head = &hash->table[bucket]; table 224 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[index]; table 267 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[index]; table 305 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 606 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 1227 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 1281 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 1351 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 1465 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 1675 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 2060 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 2179 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) { table 2293 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; table 2417 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) { table 170 net/batman-adv/distributed-arp-table.c head = &bat_priv->dat.hash->table[i]; table 338 net/batman-adv/distributed-arp-table.c head = &hash->table[index]; table 578 net/batman-adv/distributed-arp-table.c head = &hash->table[i]; table 872 net/batman-adv/distributed-arp-table.c head = &hash->table[i]; table 963 net/batman-adv/distributed-arp-table.c hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) { table 20 net/batman-adv/hash.c INIT_HLIST_HEAD(&hash->table[i]); table 34 net/batman-adv/hash.c kfree(hash->table); table 52 net/batman-adv/hash.c hash->table = kmalloc_array(size, sizeof(*hash->table), GFP_ATOMIC); table 53 net/batman-adv/hash.c if (!hash->table) table 66 net/batman-adv/hash.c kfree(hash->table); table 42 net/batman-adv/hash.h struct hlist_head *table; table 91 net/batman-adv/hash.h head = &hash->table[index]; table 140 net/batman-adv/hash.h head = &hash->table[index]; table 2123 net/batman-adv/multicast.c head = &hash->table[i]; table 2261 net/batman-adv/multicast.c hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { table 405 net/batman-adv/network-coding.c head = &hash->table[i]; table 437 net/batman-adv/network-coding.c head = &hash->table[i]; table 550 net/batman-adv/network-coding.c head = &hash->table[index]; table 683 net/batman-adv/network-coding.c head = &hash->table[i]; table 1284 net/batman-adv/network-coding.c hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) { table 1763 net/batman-adv/network-coding.c hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) { table 1905 net/batman-adv/network-coding.c head = &hash->table[i]; table 69 net/batman-adv/originator.c head = &hash->table[index]; table 982 net/batman-adv/originator.c head = &hash->table[i]; table 1347 net/batman-adv/originator.c head = &hash->table[i]; table 141 net/batman-adv/translation-table.c head = &hash->table[index]; table 1101 net/batman-adv/translation-table.c head = &hash->table[i]; table 1227 net/batman-adv/translation-table.c hlist_for_each_entry(common, &hash->table[bucket], hash_entry) { table 1441 net/batman-adv/translation-table.c head = &hash->table[i]; table 1466 net/batman-adv/translation-table.c head = &hash->table[i]; table 1996 net/batman-adv/translation-table.c head = &hash->table[i]; table 2203 net/batman-adv/translation-table.c head = &hash->table[bucket]; table 2442 net/batman-adv/translation-table.c head = &hash->table[i]; table 2508 net/batman-adv/translation-table.c head = &hash->table[i]; table 2551 net/batman-adv/translation-table.c head = &hash->table[i]; table 2685 net/batman-adv/translation-table.c head = &hash->table[i]; table 2763 net/batman-adv/translation-table.c head = &hash->table[i]; table 3006 net/batman-adv/translation-table.c head = &hash->table[i]; table 3852 net/batman-adv/translation-table.c head = &hash->table[i]; table 3892 net/batman-adv/translation-table.c head = &hash->table[i]; table 1093 net/bridge/br_netfilter_hooks.c struct ctl_table *table = brnf_table; table 1097 net/bridge/br_netfilter_hooks.c table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL); table 1098 net/bridge/br_netfilter_hooks.c if (!table) table 1103 net/bridge/br_netfilter_hooks.c table[0].data = &brnet->call_arptables; table 1104 net/bridge/br_netfilter_hooks.c table[1].data = &brnet->call_iptables; table 1105 net/bridge/br_netfilter_hooks.c table[2].data = &brnet->call_ip6tables; table 1106 net/bridge/br_netfilter_hooks.c table[3].data = &brnet->filter_vlan_tagged; table 1107 net/bridge/br_netfilter_hooks.c table[4].data = &brnet->filter_pppoe_tagged; table 1108 net/bridge/br_netfilter_hooks.c table[5].data = &brnet->pass_vlan_indev; table 1112 net/bridge/br_netfilter_hooks.c brnet->ctl_hdr = register_net_sysctl(net, "net/bridge", table); table 1115 net/bridge/br_netfilter_hooks.c kfree(table); table 1126 net/bridge/br_netfilter_hooks.c struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg; table 1130 net/bridge/br_netfilter_hooks.c kfree(table); table 33 net/bridge/netfilter/ebt_among.c start = wh->table[key]; table 34 net/bridge/netfilter/ebt_among.c limit = wh->table[key + 1]; table 59 net/bridge/netfilter/ebt_among.c if (wh->table[i] > wh->table[i + 1]) table 61 net/bridge/netfilter/ebt_among.c if (wh->table[i] < 0) table 63 net/bridge/netfilter/ebt_among.c if (wh->table[i] > wh->poolsize) table 66 net/bridge/netfilter/ebt_among.c if (wh->table[256] > wh->poolsize) table 82 net/bridge/netfilter/ebt_arpreply.c .table = "nat", table 70 net/bridge/netfilter/ebt_dnat.c if ((strcmp(par->table, "nat") != 0 || table 73 net/bridge/netfilter/ebt_dnat.c (strcmp(par->table, "broute") != 0 || table 46 net/bridge/netfilter/ebt_redirect.c if ((strcmp(par->table, "nat") != 0 || table 48 net/bridge/netfilter/ebt_redirect.c (strcmp(par->table, "broute") != 0 || table 67 net/bridge/netfilter/ebt_snat.c .table = "nat", table 48 net/bridge/netfilter/ebtable_broute.c .table = &initial_table, table 55 net/bridge/netfilter/ebtable_filter.c .table = &initial_table, table 55 net/bridge/netfilter/ebtable_nat.c .table = &initial_table, table 181 net/bridge/netfilter/ebtables.c struct ebt_table *table) table 198 net/bridge/netfilter/ebtables.c read_lock_bh(&table->lock); table 199 net/bridge/netfilter/ebtables.c private = table->private; table 220 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 241 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 245 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 269 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 281 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 299 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 302 net/bridge/netfilter/ebtables.c read_unlock_bh(&table->lock); table 696 net/bridge/netfilter/ebtables.c mtpar.table = tgpar.table = name; table 973 net/bridge/netfilter/ebtables.c struct ebt_table_info *table; table 1012 net/bridge/netfilter/ebtables.c table = t->private; table 1014 net/bridge/netfilter/ebtables.c if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) { table 1017 net/bridge/netfilter/ebtables.c } else if (table->nentries && !newinfo->nentries) table 1041 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(table->entries, table->entries_size, table 1044 net/bridge/netfilter/ebtables.c vfree(table->entries); table 1045 net/bridge/netfilter/ebtables.c ebt_free_table_info(table); table 1046 net/bridge/netfilter/ebtables.c vfree(table); table 1128 net/bridge/netfilter/ebtables.c static void __ebt_unregister_table(struct net *net, struct ebt_table *table) table 1131 net/bridge/netfilter/ebtables.c list_del(&table->list); table 1133 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, table 1135 net/bridge/netfilter/ebtables.c if (table->private->nentries) table 1136 net/bridge/netfilter/ebtables.c module_put(table->me); table 1137 net/bridge/netfilter/ebtables.c vfree(table->private->entries); table 1138 net/bridge/netfilter/ebtables.c ebt_free_table_info(table->private); table 1139 net/bridge/netfilter/ebtables.c vfree(table->private); table 1140 net/bridge/netfilter/ebtables.c kfree(table); table 1147 net/bridge/netfilter/ebtables.c struct ebt_table *t, *table; table 1152 net/bridge/netfilter/ebtables.c if (input_table == NULL || (repl = input_table->table) == NULL || table 1158 net/bridge/netfilter/ebtables.c table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); table 1159 net/bridge/netfilter/ebtables.c if (!table) { table 1196 net/bridge/netfilter/ebtables.c if (table->check && table->check(newinfo, table->valid_hooks)) { table 1201 net/bridge/netfilter/ebtables.c table->private = newinfo; table 1202 net/bridge/netfilter/ebtables.c rwlock_init(&table->lock); table 1205 net/bridge/netfilter/ebtables.c if (strcmp(t->name, table->name) == 0) { table 1212 net/bridge/netfilter/ebtables.c if (newinfo->nentries && !try_module_get(table->me)) { table 1216 net/bridge/netfilter/ebtables.c list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); table 1219 net/bridge/netfilter/ebtables.c WRITE_ONCE(*res, table); table 1220 net/bridge/netfilter/ebtables.c ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1222 net/bridge/netfilter/ebtables.c __ebt_unregister_table(net, table); table 1235 net/bridge/netfilter/ebtables.c kfree(table); table 1240 net/bridge/netfilter/ebtables.c void ebt_unregister_table(struct net *net, struct ebt_table *table, table 1243 net/bridge/netfilter/ebtables.c nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1244 net/bridge/netfilter/ebtables.c __ebt_unregister_table(net, table); table 1429 net/bridge/netfilter/ebtables.c entries_size = t->table->entries_size; table 1430 net/bridge/netfilter/ebtables.c nentries = t->table->nentries; table 1431 net/bridge/netfilter/ebtables.c entries = t->table->entries; table 1432 net/bridge/netfilter/ebtables.c oldcounters = t->table->counters; table 1512 net/bridge/netfilter/ebtables.c tmp.nentries = t->table->nentries; table 1513 net/bridge/netfilter/ebtables.c tmp.entries_size = t->table->entries_size; table 1514 net/bridge/netfilter/ebtables.c tmp.valid_hooks = t->table->valid_hooks; table 1817 net/bridge/netfilter/ebtables.c tinfo.entries_size = t->table->entries_size; table 1818 net/bridge/netfilter/ebtables.c tinfo.nentries = t->table->nentries; table 1819 net/bridge/netfilter/ebtables.c tinfo.entries = t->table->entries; table 1820 net/bridge/netfilter/ebtables.c oldcounters = t->table->counters; table 2384 net/bridge/netfilter/ebtables.c tmp.nentries = t->table->nentries; table 2385 net/bridge/netfilter/ebtables.c tmp.entries_size = t->table->entries_size; table 2386 net/bridge/netfilter/ebtables.c tmp.valid_hooks = t->table->valid_hooks; table 1713 net/core/devlink.c static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table, table 1723 net/core/devlink.c if (table->table_ops->matches_dump(table->priv, skb)) table 1761 net/core/devlink.c static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table, table 1771 net/core/devlink.c if (table->table_ops->actions_dump(table->priv, skb)) table 1783 net/core/devlink.c struct devlink_dpipe_table *table) table 1788 net/core/devlink.c table_size = table->table_ops->size_get(table->priv); table 1793 net/core/devlink.c if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) || table 1798 net/core/devlink.c table->counters_enabled)) table 1801 net/core/devlink.c if (table->resource_valid) { table 1803 net/core/devlink.c table->resource_id, DEVLINK_ATTR_PAD) || table 1805 net/core/devlink.c table->resource_units, DEVLINK_ATTR_PAD)) table 1808 net/core/devlink.c if (devlink_dpipe_matches_put(table, skb)) table 1811 net/core/devlink.c if (devlink_dpipe_actions_put(table, skb)) table 1844 net/core/devlink.c struct devlink_dpipe_table *table; table 1853 net/core/devlink.c table = list_first_entry(dpipe_tables, table 1875 net/core/devlink.c list_for_each_entry_from(table, dpipe_tables, list) { table 1877 net/core/devlink.c err = devlink_dpipe_table_put(skb, table); table 1885 net/core/devlink.c if (!strcmp(table->name, table_name)) { table 1886 net/core/devlink.c err = devlink_dpipe_table_put(skb, table); table 2082 net/core/devlink.c struct devlink_dpipe_table *table; table 2084 net/core/devlink.c list_for_each_entry_rcu(table, dpipe_tables, list) { table 2085 net/core/devlink.c if (!strcmp(table->name, table_name)) table 2086 net/core/devlink.c return table; table 2163 net/core/devlink.c struct devlink_dpipe_table *table) table 2173 net/core/devlink.c err = table->table_ops->entries_dump(table->priv, table 2174 net/core/devlink.c table->counters_enabled, table 2195 net/core/devlink.c struct devlink_dpipe_table *table; table 2202 net/core/devlink.c table = devlink_dpipe_table_find(&devlink->dpipe_table_list, table 2204 net/core/devlink.c if (!table) table 2207 net/core/devlink.c if (!table->table_ops->entries_dump) table 2211 net/core/devlink.c 0, table); table 2356 net/core/devlink.c struct devlink_dpipe_table *table; table 2358 net/core/devlink.c table = devlink_dpipe_table_find(&devlink->dpipe_table_list, table 2360 net/core/devlink.c if (!table) table 2363 net/core/devlink.c if (table->counter_control_extern) table 2366 net/core/devlink.c if (!(table->counters_enabled ^ enable)) table 2369 net/core/devlink.c table->counters_enabled = enable; table 2370 net/core/devlink.c if (table->table_ops->counters_set_update) table 2371 net/core/devlink.c table->table_ops->counters_set_update(table->priv, enable); table 6706 net/core/devlink.c struct devlink_dpipe_table *table; table 6710 net/core/devlink.c table = devlink_dpipe_table_find(&devlink->dpipe_table_list, table 6713 net/core/devlink.c if (table) table 6714 net/core/devlink.c enabled = table->counters_enabled; table 6734 net/core/devlink.c struct devlink_dpipe_table *table; table 6742 net/core/devlink.c table = kzalloc(sizeof(*table), GFP_KERNEL); table 6743 net/core/devlink.c if (!table) table 6746 net/core/devlink.c table->name = table_name; table 6747 net/core/devlink.c table->table_ops = table_ops; table 6748 net/core/devlink.c table->priv = priv; table 6749 net/core/devlink.c table->counter_control_extern = counter_control_extern; table 6752 net/core/devlink.c list_add_tail_rcu(&table->list, &devlink->dpipe_table_list); table 6767 net/core/devlink.c struct devlink_dpipe_table *table; table 6770 net/core/devlink.c table = devlink_dpipe_table_find(&devlink->dpipe_table_list, table 6772 net/core/devlink.c if (!table) table 6774 net/core/devlink.c list_del_rcu(&table->list); table 6776 net/core/devlink.c kfree_rcu(table, rcu); table 6923 net/core/devlink.c struct devlink_dpipe_table *table; table 6927 net/core/devlink.c table = devlink_dpipe_table_find(&devlink->dpipe_table_list, table 6929 net/core/devlink.c if (!table) { table 6933 net/core/devlink.c table->resource_id = resource_id; table 6934 net/core/devlink.c table->resource_units = resource_units; table 6935 net/core/devlink.c table->resource_valid = true; table 42 net/core/fib_rules.c u32 pref, u32 table, u32 flags) table 53 net/core/fib_rules.c r->table = table; table 399 net/core/fib_rules.c if (rule->table && r->table != rule->table) table 574 net/core/fib_rules.c nlrule->table = frh_get_table(frh, tb); table 602 net/core/fib_rules.c if (nlrule->l3mdev && nlrule->table) { table 665 net/core/fib_rules.c if (r->table != rule->table) table 970 net/core/fib_rules.c frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT; table 971 net/core/fib_rules.c if (nla_put_u32(skb, FRA_TABLE, rule->table)) table 1076 net/core/fib_rules.c if (frh->dst_len || frh->src_len || frh->tos || frh->table || table 783 net/core/net-sysfs.c struct rps_dev_flow_table *table = container_of(rcu, table 785 net/core/net-sysfs.c vfree(table); table 792 net/core/net-sysfs.c struct rps_dev_flow_table *table, *old_table; table 824 net/core/net-sysfs.c table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); table 825 net/core/net-sysfs.c if (!table) table 828 net/core/net-sysfs.c table->mask = mask; table 830 net/core/net-sysfs.c table->flows[count].cpu = RPS_NO_CPU; table 832 net/core/net-sysfs.c table = NULL; table 838 net/core/net-sysfs.c rcu_assign_pointer(queue->rps_flow_table, table); table 47 net/core/sysctl_net_core.c static int rps_sock_flow_sysctl(struct ctl_table *table, int write, table 55 net/core/sysctl_net_core.c .mode = table->mode table 117 net/core/sysctl_net_core.c static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, table 196 net/core/sysctl_net_core.c static int flow_limit_table_len_sysctl(struct ctl_table *table, int write, table 205 net/core/sysctl_net_core.c ptr = table->data; table 207 net/core/sysctl_net_core.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 219 net/core/sysctl_net_core.c static int set_default_qdisc(struct ctl_table *table, int write, table 238 net/core/sysctl_net_core.c static int proc_do_dev_weight(struct ctl_table *table, int write, table 243 net/core/sysctl_net_core.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 253 net/core/sysctl_net_core.c static int proc_do_rss_key(struct ctl_table *table, int write, table 266 net/core/sysctl_net_core.c static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, table 270 net/core/sysctl_net_core.c int ret, jit_enable = *(int *)table->data; table 271 net/core/sysctl_net_core.c struct ctl_table tmp = *table; table 281 net/core/sysctl_net_core.c *(int *)table->data = jit_enable; table 293 net/core/sysctl_net_core.c proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, table 300 net/core/sysctl_net_core.c return proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 305 net/core/sysctl_net_core.c proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, table 312 net/core/sysctl_net_core.c return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 978 net/dcb/dcbnl.c struct dcb_app *table = NULL; table 990 net/dcb/dcbnl.c table = kmalloc_array(app_count, sizeof(struct dcb_app), table 992 net/dcb/dcbnl.c if (!table) table 995 net/dcb/dcbnl.c err = ops->peer_getapptable(netdev, table); table 1018 net/dcb/dcbnl.c &table[i])) table 1026 net/dcb/dcbnl.c kfree(table); table 944 net/dccp/feat.c const struct ccid_dependency *table = dccp_feat_ccid_deps(id, is_local); table 945 net/dccp/feat.c int i, rc = (table == NULL); table 947 net/dccp/feat.c for (i = 0; rc == 0 && table[i].dependent_feat != DCCPF_RESERVED; i++) table 948 net/dccp/feat.c if (dccp_feat_type(table[i].dependent_feat) == FEAT_SP) table 949 net/dccp/feat.c rc = __feat_register_sp(fn, table[i].dependent_feat, table 950 net/dccp/feat.c table[i].is_local, table 951 net/dccp/feat.c table[i].is_mandatory, table 952 net/dccp/feat.c &table[i].val, 1); table 954 net/dccp/feat.c rc = __feat_register_nn(fn, table[i].dependent_feat, table 955 net/dccp/feat.c table[i].is_mandatory, table 956 net/dccp/feat.c table[i].val); table 247 net/decnet/dn_dev.c static int dn_forwarding_proc(struct ctl_table *table, int write, table 252 net/decnet/dn_dev.c struct net_device *dev = table->extra1; table 257 net/decnet/dn_dev.c if (table->extra1 == NULL) table 263 net/decnet/dn_dev.c err = proc_dointvec(table, write, buffer, lenp, ppos); table 497 net/decnet/dn_fib.c static inline u32 rtm_get_table(struct nlattr *attrs[], u8 table) table 500 net/decnet/dn_fib.c table = nla_get_u32(attrs[RTA_TABLE]); table 502 net/decnet/dn_fib.c return table; table 93 net/decnet/dn_rules.c tbl = dn_fib_get_table(rule->table, 0); table 135 net/decnet/dn_rules.c if (rule->table == RT_TABLE_UNSPEC) { table 137 net/decnet/dn_rules.c struct dn_fib_table *table; table 139 net/decnet/dn_rules.c table = dn_fib_empty_table(); table 140 net/decnet/dn_rules.c if (table == NULL) { table 145 net/decnet/dn_rules.c rule->table = table->n; table 187 net/decnet/dn_table.c static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) table 212 net/decnet/dn_table.c if (table->dh_zones[i]) table 217 net/decnet/dn_table.c dz->dz_next = table->dh_zone_list; table 218 net/decnet/dn_table.c table->dh_zone_list = dz; table 220 net/decnet/dn_table.c dz->dz_next = table->dh_zones[i]->dz_next; table 221 net/decnet/dn_table.c table->dh_zones[i]->dz_next = dz; table 223 net/decnet/dn_table.c table->dh_zones[z] = dz; table 467 net/decnet/dn_table.c struct dn_hash *table = (struct dn_hash *)tb->data; table 471 net/decnet/dn_table.c for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) { table 532 net/decnet/dn_table.c struct dn_hash *table = (struct dn_hash *)tb->data; table 544 net/decnet/dn_table.c dz = table->dh_zones[z]; table 545 net/decnet/dn_table.c if (!dz && !(dz = dn_new_zone(table, z))) table 669 net/decnet/dn_table.c struct dn_hash *table = (struct dn_hash*)tb->data; table 680 net/decnet/dn_table.c if ((dz = table->dh_zones[z]) == NULL) table 748 net/decnet/dn_table.c static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table) table 773 net/decnet/dn_table.c struct dn_hash *table = (struct dn_hash *)tb->data; table 778 net/decnet/dn_table.c for(dz = table->dh_zone_list; dz; dz = dz->dz_next) { table 782 net/decnet/dn_table.c tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table); table 136 net/decnet/sysctl_net_decnet.c static int dn_node_address_handler(struct ctl_table *table, int write, table 187 net/decnet/sysctl_net_decnet.c static int dn_def_dev_handler(struct ctl_table *table, int write, table 358 net/ieee802154/6lowpan/reassembly.c struct ctl_table *table; table 363 net/ieee802154/6lowpan/reassembly.c table = lowpan_frags_ns_ctl_table; table 365 net/ieee802154/6lowpan/reassembly.c table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table), table 367 net/ieee802154/6lowpan/reassembly.c if (table == NULL) table 372 net/ieee802154/6lowpan/reassembly.c table[0].procname = NULL; table 375 net/ieee802154/6lowpan/reassembly.c table[0].data = &ieee802154_lowpan->fqdir->high_thresh; table 376 net/ieee802154/6lowpan/reassembly.c table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh; table 377 net/ieee802154/6lowpan/reassembly.c table[1].data = &ieee802154_lowpan->fqdir->low_thresh; table 378 net/ieee802154/6lowpan/reassembly.c table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh; table 379 net/ieee802154/6lowpan/reassembly.c table[2].data = &ieee802154_lowpan->fqdir->timeout; table 381 net/ieee802154/6lowpan/reassembly.c hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table); table 390 net/ieee802154/6lowpan/reassembly.c kfree(table); table 397 net/ieee802154/6lowpan/reassembly.c struct ctl_table *table; table 401 net/ieee802154/6lowpan/reassembly.c table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg; table 404 net/ieee802154/6lowpan/reassembly.c kfree(table); table 765 net/ieee802154/nl-mac.c struct ieee802154_llsec_table *table; table 795 net/ieee802154/nl-mac.c data.ops->llsec->get_table(data.dev, &data.table); table 953 net/ieee802154/nl-mac.c list_for_each_entry(pos, &data->table->keys, list) { table 1084 net/ieee802154/nl-mac.c list_for_each_entry(pos, &data->table->devices, list) { table 1188 net/ieee802154/nl-mac.c list_for_each_entry(dpos, &data->table->devices, list) { table 1320 net/ieee802154/nl-mac.c list_for_each_entry(pos, &data->table->security_levels, list) { table 1501 net/ieee802154/nl802154.c struct ieee802154_llsec_table *table; table 1515 net/ieee802154/nl802154.c rdev_get_llsec_table(rdev, wpan_dev, &table); table 1521 net/ieee802154/nl802154.c list_for_each_entry(key, &table->keys, list) { table 1667 net/ieee802154/nl802154.c struct ieee802154_llsec_table *table; table 1681 net/ieee802154/nl802154.c rdev_get_llsec_table(rdev, wpan_dev, &table); table 1687 net/ieee802154/nl802154.c list_for_each_entry(dev, &table->devices, list) { table 1836 net/ieee802154/nl802154.c struct ieee802154_llsec_table *table; table 1850 net/ieee802154/nl802154.c rdev_get_llsec_table(rdev, wpan_dev, &table); table 1857 net/ieee802154/nl802154.c list_for_each_entry(dpos, &table->devices, list) { table 1997 net/ieee802154/nl802154.c struct ieee802154_llsec_table *table; table 2011 net/ieee802154/nl802154.c rdev_get_llsec_table(rdev, wpan_dev, &table); table 2017 net/ieee802154/nl802154.c list_for_each_entry(sl, &table->security_levels, list) { table 217 net/ieee802154/rdev-ops.h struct ieee802154_llsec_table **table) table 219 net/ieee802154/rdev-ops.h rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table); table 222 net/ipv4/fib_frontend.c struct fib_table *table; table 231 net/ipv4/fib_frontend.c table = fib_get_table(net, tb_id); table 232 net/ipv4/fib_frontend.c if (table) { table 234 net/ipv4/fib_frontend.c if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) { table 61 net/ipv4/fib_rules.c if (rule->table != RT_TABLE_LOCAL && rule->table != RT_TABLE_MAIN && table 62 net/ipv4/fib_rules.c rule->table != RT_TABLE_DEFAULT) table 237 net/ipv4/fib_rules.c if (rule->table == RT_TABLE_UNSPEC && !rule->l3mdev) { table 239 net/ipv4/fib_rules.c struct fib_table *table; table 241 net/ipv4/fib_rules.c table = fib_empty_table(net); table 242 net/ipv4/fib_rules.c if (!table) { table 247 net/ipv4/fib_rules.c rule->table = table->tb_id; table 987 net/ipv4/fib_semantics.c u32 table, struct netlink_ext_ack *extack) table 990 net/ipv4/fib_semantics.c .fc_table = table, table 1054 net/ipv4/fib_semantics.c static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, table 1103 net/ipv4/fib_semantics.c if (table) table 1104 net/ipv4/fib_semantics.c tbl = fib_get_table(net, table); table 1183 net/ipv4/fib_semantics.c int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, table 1189 net/ipv4/fib_semantics.c err = fib_check_nh_v4_gw(net, nh, table, scope, extack); table 1191 net/ipv4/fib_semantics.c err = fib_check_nh_v6_gw(net, nh, table, extack); table 1993 net/ipv4/fib_semantics.c struct fib_table *tb = res->table; table 2205 net/ipv4/fib_semantics.c res->table->tb_num_default > 1 && table 1498 net/ipv4/fib_trie.c res->table = tb; table 130 net/ipv4/inet_hashtables.c struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; table 133 net/ipv4/inet_hashtables.c table->bhash_size); table 134 net/ipv4/inet_hashtables.c struct inet_bind_hashbucket *head = &table->bhash[bhash]; table 158 net/ipv4/inet_hashtables.c tb = inet_bind_bucket_create(table->bind_bucket_cachep, table 592 net/ipv4/ip_fragment.c struct ctl_table *table; table 595 net/ipv4/ip_fragment.c table = ip4_frags_ns_ctl_table; table 597 net/ipv4/ip_fragment.c table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); table 598 net/ipv4/ip_fragment.c if (!table) table 602 net/ipv4/ip_fragment.c table[0].data = &net->ipv4.fqdir->high_thresh; table 603 net/ipv4/ip_fragment.c table[0].extra1 = &net->ipv4.fqdir->low_thresh; table 604 net/ipv4/ip_fragment.c table[1].data = &net->ipv4.fqdir->low_thresh; table 605 net/ipv4/ip_fragment.c table[1].extra2 = &net->ipv4.fqdir->high_thresh; table 606 net/ipv4/ip_fragment.c table[2].data = &net->ipv4.fqdir->timeout; table 607 net/ipv4/ip_fragment.c table[3].data = &net->ipv4.fqdir->max_dist; table 609 net/ipv4/ip_fragment.c hdr = register_net_sysctl(net, "net/ipv4", table); table 618 net/ipv4/ip_fragment.c kfree(table); table 625 net/ipv4/ip_fragment.c struct ctl_table *table; table 627 net/ipv4/ip_fragment.c table = net->ipv4.frags_hdr->ctl_table_arg; table 629 net/ipv4/ip_fragment.c kfree(table); table 182 net/ipv4/ipmr.c arg->table = fib_rule_get_table(rule, arg); table 184 net/ipv4/ipmr.c mrt = ipmr_get_table(rule->fr_net, arg->table); table 293 net/ipv4/ipmr.c return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT; table 184 net/ipv4/netfilter/arp_tables.c struct xt_table *table) table 206 net/ipv4/netfilter/arp_tables.c private = READ_ONCE(table->private); /* Address dependency. */ table 392 net/ipv4/netfilter/arp_tables.c .table = name, table 648 net/ipv4/netfilter/arp_tables.c static struct xt_counters *alloc_counters(const struct xt_table *table) table 652 net/ipv4/netfilter/arp_tables.c const struct xt_table_info *private = table->private; table 670 net/ipv4/netfilter/arp_tables.c const struct xt_table *table, table 676 net/ipv4/netfilter/arp_tables.c struct xt_table_info *private = table->private; table 680 net/ipv4/netfilter/arp_tables.c counters = alloc_counters(table); table 1356 net/ipv4/netfilter/arp_tables.c struct xt_table *table, table 1360 net/ipv4/netfilter/arp_tables.c const struct xt_table_info *private = table->private; table 1367 net/ipv4/netfilter/arp_tables.c counters = alloc_counters(table); table 1518 net/ipv4/netfilter/arp_tables.c static void __arpt_unregister_table(struct net *net, struct xt_table *table) table 1522 net/ipv4/netfilter/arp_tables.c struct module *table_owner = table->me; table 1525 net/ipv4/netfilter/arp_tables.c private = xt_unregister_table(table); table 1537 net/ipv4/netfilter/arp_tables.c const struct xt_table *table, table 1559 net/ipv4/netfilter/arp_tables.c new_table = xt_register_table(net, table, &bootstrap, newinfo); table 1568 net/ipv4/netfilter/arp_tables.c ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1581 net/ipv4/netfilter/arp_tables.c void arpt_unregister_table(struct net *net, struct xt_table *table, table 1584 net/ipv4/netfilter/arp_tables.c nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1585 net/ipv4/netfilter/arp_tables.c __arpt_unregister_table(net, table); table 227 net/ipv4/netfilter/ip_tables.c struct xt_table *table) table 258 net/ipv4/netfilter/ip_tables.c WARN_ON(!(table->valid_hooks & (1 << hook))); table 261 net/ipv4/netfilter/ip_tables.c private = READ_ONCE(table->private); /* Address dependency. */ table 308 net/ipv4/netfilter/ip_tables.c state->out, table->name, private, e); table 503 net/ipv4/netfilter/ip_tables.c .table = name, table 533 net/ipv4/netfilter/ip_tables.c mtpar.table = name; table 790 net/ipv4/netfilter/ip_tables.c static struct xt_counters *alloc_counters(const struct xt_table *table) table 794 net/ipv4/netfilter/ip_tables.c const struct xt_table_info *private = table->private; table 812 net/ipv4/netfilter/ip_tables.c const struct xt_table *table, table 818 net/ipv4/netfilter/ip_tables.c const struct xt_table_info *private = table->private; table 822 net/ipv4/netfilter/ip_tables.c counters = alloc_counters(table); table 1569 net/ipv4/netfilter/ip_tables.c compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, table 1573 net/ipv4/netfilter/ip_tables.c const struct xt_table_info *private = table->private; table 1580 net/ipv4/netfilter/ip_tables.c counters = alloc_counters(table); table 1737 net/ipv4/netfilter/ip_tables.c static void __ipt_unregister_table(struct net *net, struct xt_table *table) table 1741 net/ipv4/netfilter/ip_tables.c struct module *table_owner = table->me; table 1744 net/ipv4/netfilter/ip_tables.c private = xt_unregister_table(table); table 1755 net/ipv4/netfilter/ip_tables.c int ipt_register_table(struct net *net, const struct xt_table *table, table 1776 net/ipv4/netfilter/ip_tables.c new_table = xt_register_table(net, table, &bootstrap, newinfo); table 1787 net/ipv4/netfilter/ip_tables.c ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1800 net/ipv4/netfilter/ip_tables.c void ipt_unregister_table(struct net *net, struct xt_table *table, table 1804 net/ipv4/netfilter/ip_tables.c nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1805 net/ipv4/netfilter/ip_tables.c __ipt_unregister_table(net, table); table 117 net/ipv4/netfilter/ipt_ECN.c .table = "mangle", table 92 net/ipv4/netfilter/ipt_REJECT.c .table = "filter", table 95 net/ipv4/netfilter/ipt_rpfilter.c if (strcmp(par->table, "mangle") != 0 && table 96 net/ipv4/netfilter/ipt_rpfilter.c strcmp(par->table, "raw") != 0) { table 98 net/ipv4/netfilter/ipt_rpfilter.c par->table); table 52 net/ipv4/netfilter/iptable_raw.c const struct xt_table *table = &packet_raw; table 56 net/ipv4/netfilter/iptable_raw.c table = &packet_raw_before_defrag; table 61 net/ipv4/netfilter/iptable_raw.c repl = ipt_alloc_initial_table(table); table 64 net/ipv4/netfilter/iptable_raw.c ret = ipt_register_table(net, table, repl, rawtable_ops, table 85 net/ipv4/netfilter/iptable_raw.c const struct xt_table *table = &packet_raw; table 88 net/ipv4/netfilter/iptable_raw.c table = &packet_raw_before_defrag; table 93 net/ipv4/netfilter/iptable_raw.c rawtable_ops = xt_hook_ops_alloc(table, iptable_raw_hook); table 71 net/ipv4/ping.c static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, table 74 net/ipv4/ping.c return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; table 2071 net/ipv4/route.c res->table = NULL; table 2218 net/ipv4/route.c res->table = NULL; table 2447 net/ipv4/route.c .table = NULL, table 2571 net/ipv4/route.c res->table = NULL; table 3176 net/ipv4/route.c table_id = res.table ? res.table->tb_id : 0; table 73 net/ipv4/sysctl_net_ipv4.c static int ipv4_local_port_range(struct ctl_table *table, int write, table 78 net/ipv4/sysctl_net_ipv4.c container_of(table->data, struct net, ipv4.ip_local_ports.range); table 84 net/ipv4/sysctl_net_ipv4.c .mode = table->mode, table 109 net/ipv4/sysctl_net_ipv4.c static int ipv4_privileged_ports(struct ctl_table *table, int write, table 112 net/ipv4/sysctl_net_ipv4.c struct net *net = container_of(table->data, struct net, table 120 net/ipv4/sysctl_net_ipv4.c .mode = table->mode, table 143 net/ipv4/sysctl_net_ipv4.c static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high) table 145 net/ipv4/sysctl_net_ipv4.c kgid_t *data = table->data; table 147 net/ipv4/sysctl_net_ipv4.c container_of(table->data, struct net, ipv4.ping_group_range.range); table 158 net/ipv4/sysctl_net_ipv4.c static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high) table 160 net/ipv4/sysctl_net_ipv4.c kgid_t *data = table->data; table 162 net/ipv4/sysctl_net_ipv4.c container_of(table->data, struct net, ipv4.ping_group_range.range); table 170 net/ipv4/sysctl_net_ipv4.c static int ipv4_ping_group_range(struct ctl_table *table, int write, table 181 net/ipv4/sysctl_net_ipv4.c .mode = table->mode, table 186 net/ipv4/sysctl_net_ipv4.c inet_get_ping_group_range_table(table, &low, &high); table 200 net/ipv4/sysctl_net_ipv4.c set_ping_group_range(table, low, high); table 206 net/ipv4/sysctl_net_ipv4.c static int ipv4_fwd_update_priority(struct ctl_table *table, int write, table 213 net/ipv4/sysctl_net_ipv4.c net = container_of(table->data, struct net, table 215 net/ipv4/sysctl_net_ipv4.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 298 net/ipv4/sysctl_net_ipv4.c static int proc_tcp_fastopen_key(struct ctl_table *table, int write, table 302 net/ipv4/sysctl_net_ipv4.c struct net *net = container_of(table->data, struct net, table 397 net/ipv4/sysctl_net_ipv4.c static int proc_tcp_early_demux(struct ctl_table *table, int write, table 402 net/ipv4/sysctl_net_ipv4.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 413 net/ipv4/sysctl_net_ipv4.c static int proc_udp_early_demux(struct ctl_table *table, int write, table 418 net/ipv4/sysctl_net_ipv4.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 429 net/ipv4/sysctl_net_ipv4.c static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, table 434 net/ipv4/sysctl_net_ipv4.c struct net *net = container_of(table->data, struct net, table 438 net/ipv4/sysctl_net_ipv4.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 464 net/ipv4/sysctl_net_ipv4.c static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, table 468 net/ipv4/sysctl_net_ipv4.c struct net *net = container_of(table->data, struct net, table 472 net/ipv4/sysctl_net_ipv4.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 1331 net/ipv4/sysctl_net_ipv4.c struct ctl_table *table; table 1333 net/ipv4/sysctl_net_ipv4.c table = ipv4_net_table; table 1337 net/ipv4/sysctl_net_ipv4.c table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); table 1338 net/ipv4/sysctl_net_ipv4.c if (!table) table 1343 net/ipv4/sysctl_net_ipv4.c table[i].data += (void *)net - (void *)&init_net; table 1346 net/ipv4/sysctl_net_ipv4.c net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); table 1360 net/ipv4/sysctl_net_ipv4.c kfree(table); table 1367 net/ipv4/sysctl_net_ipv4.c struct ctl_table *table; table 1370 net/ipv4/sysctl_net_ipv4.c table = net->ipv4.ipv4_hdr->ctl_table_arg; table 1372 net/ipv4/sysctl_net_ipv4.c kfree(table); table 2999 net/ipv4/udp.c void __init udp_table_init(struct udp_table *table, const char *name) table 3003 net/ipv4/udp.c table->hash = alloc_large_system_hash(name, table 3008 net/ipv4/udp.c &table->log, table 3009 net/ipv4/udp.c &table->mask, table 3013 net/ipv4/udp.c table->hash2 = table->hash + (table->mask + 1); table 3014 net/ipv4/udp.c for (i = 0; i <= table->mask; i++) { table 3015 net/ipv4/udp.c INIT_HLIST_HEAD(&table->hash[i].head); table 3016 net/ipv4/udp.c table->hash[i].count = 0; table 3017 net/ipv4/udp.c spin_lock_init(&table->hash[i].lock); table 3019 net/ipv4/udp.c for (i = 0; i <= table->mask; i++) { table 3020 net/ipv4/udp.c INIT_HLIST_HEAD(&table->hash2[i].head); table 3021 net/ipv4/udp.c table->hash2[i].count = 0; table 3022 net/ipv4/udp.c spin_lock_init(&table->hash2[i].lock); table 95 net/ipv4/udp_diag.c static void udp_dump(struct udp_table *table, struct sk_buff *skb, table 106 net/ipv4/udp_diag.c for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { table 107 net/ipv4/udp_diag.c struct udp_hslot *hslot = &table->hash[slot]; table 173 net/ipv4/xfrm4_policy.c struct ctl_table *table; table 176 net/ipv4/xfrm4_policy.c table = xfrm4_policy_table; table 178 net/ipv4/xfrm4_policy.c table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL); table 179 net/ipv4/xfrm4_policy.c if (!table) table 182 net/ipv4/xfrm4_policy.c table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh; table 185 net/ipv4/xfrm4_policy.c hdr = register_net_sysctl(net, "net/ipv4", table); table 194 net/ipv4/xfrm4_policy.c kfree(table); table 201 net/ipv4/xfrm4_policy.c struct ctl_table *table; table 206 net/ipv4/xfrm4_policy.c table = net->ipv4.xfrm4_hdr->ctl_table_arg; table 209 net/ipv4/xfrm4_policy.c kfree(table); table 840 net/ipv6/addrconf.c static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) table 848 net/ipv6/addrconf.c net = (struct net *)table->extra2; table 879 net/ipv6/addrconf.c dev_forward_change((struct inet6_dev *)table->extra1); table 908 net/ipv6/addrconf.c static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf) table 916 net/ipv6/addrconf.c net = (struct net *)table->extra2; table 2410 net/ipv6/addrconf.c struct fib6_table *table; table 2413 net/ipv6/addrconf.c table = fib6_get_table(dev_net(dev), tb_id); table 2414 net/ipv6/addrconf.c if (!table) table 2418 net/ipv6/addrconf.c fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true); table 6128 net/ipv6/addrconf.c static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) table 6136 net/ipv6/addrconf.c net = (struct net *)table->extra2; table 6149 net/ipv6/addrconf.c dev_disable_change((struct inet6_dev *)table->extra1); table 6866 net/ipv6/addrconf.c struct ctl_table *table; table 6869 net/ipv6/addrconf.c table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL); table 6870 net/ipv6/addrconf.c if (!table) table 6873 net/ipv6/addrconf.c for (i = 0; table[i].data; i++) { table 6874 net/ipv6/addrconf.c table[i].data += (char *)p - (char *)&ipv6_devconf; table 6879 net/ipv6/addrconf.c if (!table[i].extra1 && !table[i].extra2) { table 6880 net/ipv6/addrconf.c table[i].extra1 = idev; /* embedded; no ref */ table 6881 net/ipv6/addrconf.c table[i].extra2 = net; table 6887 net/ipv6/addrconf.c p->sysctl_header = register_net_sysctl(net, path, table); table 6902 net/ipv6/addrconf.c kfree(table); table 6910 net/ipv6/addrconf.c struct ctl_table *table; table 6915 net/ipv6/addrconf.c table = p->sysctl_header->ctl_table_arg; table 6918 net/ipv6/addrconf.c kfree(table); table 151 net/ipv6/addrconf_core.c eafnosupport_fib6_table_lookup(struct net *net, struct fib6_table *table, table 44 net/ipv6/fib6_rules.c if (rule->table != RT6_TABLE_LOCAL && rule->table != RT6_TABLE_MAIN) table 159 net/ipv6/fib6_rules.c struct fib6_table *table; table 176 net/ipv6/fib6_rules.c table = fib6_get_table(net, tb_id); table 177 net/ipv6/fib6_rules.c if (!table) table 181 net/ipv6/fib6_rules.c err = fib6_table_lookup(net, table, *oif, flp6, res, flags); table 197 net/ipv6/fib6_rules.c struct fib6_table *table; table 222 net/ipv6/fib6_rules.c table = fib6_get_table(net, tb_id); table 223 net/ipv6/fib6_rules.c if (!table) { table 228 net/ipv6/fib6_rules.c rt = lookup(net, table, flp6, arg->lookup_data, flags); table 349 net/ipv6/fib6_rules.c if (rule->table == RT6_TABLE_UNSPEC) { table 354 net/ipv6/fib6_rules.c if (fib6_new_table(net, rule->table) == NULL) { table 1161 net/ipv6/icmp.c struct ctl_table *table; table 1163 net/ipv6/icmp.c table = kmemdup(ipv6_icmp_table_template, table 1167 net/ipv6/icmp.c if (table) { table 1168 net/ipv6/icmp.c table[0].data = &net->ipv6.sysctl.icmpv6_time; table 1169 net/ipv6/icmp.c table[1].data = &net->ipv6.sysctl.icmpv6_echo_ignore_all; table 1170 net/ipv6/icmp.c table[2].data = &net->ipv6.sysctl.icmpv6_echo_ignore_multicast; table 1171 net/ipv6/icmp.c table[3].data = &net->ipv6.sysctl.icmpv6_echo_ignore_anycast; table 1172 net/ipv6/icmp.c table[4].data = &net->ipv6.sysctl.icmpv6_ratemask_ptr; table 1174 net/ipv6/icmp.c return table; table 56 net/ipv6/ip6_fib.c struct fib6_table *table, table 59 net/ipv6/ip6_fib.c struct fib6_table *table, table 211 net/ipv6/ip6_fib.c static void fib6_free_table(struct fib6_table *table) table 213 net/ipv6/ip6_fib.c inetpeer_invalidate_tree(&table->tb6_peers); table 214 net/ipv6/ip6_fib.c kfree(table); table 239 net/ipv6/ip6_fib.c struct fib6_table *table; table 241 net/ipv6/ip6_fib.c table = kzalloc(sizeof(*table), GFP_ATOMIC); table 242 net/ipv6/ip6_fib.c if (table) { table 243 net/ipv6/ip6_fib.c table->tb6_id = id; table 244 net/ipv6/ip6_fib.c rcu_assign_pointer(table->tb6_root.leaf, table 246 net/ipv6/ip6_fib.c table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; table 247 net/ipv6/ip6_fib.c inet_peer_base_init(&table->tb6_peers); table 250 net/ipv6/ip6_fib.c return table; table 518 net/ipv6/ip6_fib.c static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, table 526 net/ipv6/ip6_fib.c w->root = &table->tb6_root; table 533 net/ipv6/ip6_fib.c spin_lock_bh(&table->tb6_lock); table 535 net/ipv6/ip6_fib.c spin_unlock_bh(&table->tb6_lock); table 551 net/ipv6/ip6_fib.c spin_lock_bh(&table->tb6_lock); table 553 net/ipv6/ip6_fib.c spin_unlock_bh(&table->tb6_lock); table 686 net/ipv6/ip6_fib.c struct fib6_table *table, table 707 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 755 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)) : table 757 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 808 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 848 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock))); table 902 net/ipv6/ip6_fib.c const struct fib6_table *table) table 935 net/ipv6/ip6_fib.c const struct fib6_table *table; table 942 net/ipv6/ip6_fib.c __fib6_drop_pcpu_from(nh, arg->from, arg->table); table 947 net/ipv6/ip6_fib.c const struct fib6_table *table) table 958 net/ipv6/ip6_fib.c .table = table table 967 net/ipv6/ip6_fib.c __fib6_drop_pcpu_from(fib6_nh, f6i, table); table 974 net/ipv6/ip6_fib.c struct fib6_table *table = rt->fib6_table; table 976 net/ipv6/ip6_fib.c fib6_drop_pcpu_from(rt, table); table 990 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 993 net/ipv6/ip6_fib.c new_leaf = fib6_find_prefix(net, table, fn); table 1000 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1306 net/ipv6/ip6_fib.c struct fib6_table *table = rt->fib6_table; table 1322 net/ipv6/ip6_fib.c fn = fib6_add_1(info->nl_net, table, root, table 1363 net/ipv6/ip6_fib.c sn = fib6_add_1(info->nl_net, table, sfn, table 1382 net/ipv6/ip6_fib.c sn = fib6_add_1(info->nl_net, table, FIB6_SUBTREE(fn), table 1425 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1432 net/ipv6/ip6_fib.c pn_leaf = fib6_find_prefix(info->nl_net, table, table 1462 net/ipv6/ip6_fib.c fib6_repair_tree(info->nl_net, table, fn); table 1674 net/ipv6/ip6_fib.c struct fib6_table *table, table 1684 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1686 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1689 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1692 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1706 net/ipv6/ip6_fib.c struct fib6_table *table, table 1723 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1725 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1727 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1729 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1731 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1733 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1735 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1758 net/ipv6/ip6_fib.c new_fn_leaf = fib6_find_prefix(net, table, fn); table 1826 net/ipv6/ip6_fib.c static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, table 1831 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1867 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1883 net/ipv6/ip6_fib.c fn = fib6_repair_tree(net, table, fn); table 1901 net/ipv6/ip6_fib.c struct fib6_table *table = rt->fib6_table; table 1917 net/ipv6/ip6_fib.c lockdep_is_held(&table->tb6_lock)); table 1919 net/ipv6/ip6_fib.c fib6_del_route(table, fn, rtp, info); table 2138 net/ipv6/ip6_fib.c struct fib6_table *table; table 2145 net/ipv6/ip6_fib.c hlist_for_each_entry_rcu(table, head, tb6_hlist) { table 2146 net/ipv6/ip6_fib.c spin_lock_bh(&table->tb6_lock); table 2147 net/ipv6/ip6_fib.c fib6_clean_tree(net, &table->tb6_root, table 2149 net/ipv6/ip6_fib.c spin_unlock_bh(&table->tb6_lock); table 169 net/ipv6/ip6mr.c arg->table = fib_rule_get_table(rule, arg); table 171 net/ipv6/ip6mr.c mrt = ip6mr_get_table(rule->fr_net, arg->table); table 281 net/ipv6/ip6mr.c rule->table == RT6_TABLE_DFLT && !rule->l3mdev; table 252 net/ipv6/netfilter/ip6_tables.c struct xt_table *table) table 279 net/ipv6/netfilter/ip6_tables.c WARN_ON(!(table->valid_hooks & (1 << hook))); table 283 net/ipv6/netfilter/ip6_tables.c private = READ_ONCE(table->private); /* Address dependency. */ table 331 net/ipv6/netfilter/ip6_tables.c state->out, table->name, private, e); table 521 net/ipv6/netfilter/ip6_tables.c .table = name, table 552 net/ipv6/netfilter/ip6_tables.c mtpar.table = name; table 806 net/ipv6/netfilter/ip6_tables.c static struct xt_counters *alloc_counters(const struct xt_table *table) table 810 net/ipv6/netfilter/ip6_tables.c const struct xt_table_info *private = table->private; table 828 net/ipv6/netfilter/ip6_tables.c const struct xt_table *table, table 834 net/ipv6/netfilter/ip6_tables.c const struct xt_table_info *private = table->private; table 838 net/ipv6/netfilter/ip6_tables.c counters = alloc_counters(table); table 1578 net/ipv6/netfilter/ip6_tables.c compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, table 1582 net/ipv6/netfilter/ip6_tables.c const struct xt_table_info *private = table->private; table 1589 net/ipv6/netfilter/ip6_tables.c counters = alloc_counters(table); table 1746 net/ipv6/netfilter/ip6_tables.c static void __ip6t_unregister_table(struct net *net, struct xt_table *table) table 1750 net/ipv6/netfilter/ip6_tables.c struct module *table_owner = table->me; table 1753 net/ipv6/netfilter/ip6_tables.c private = xt_unregister_table(table); table 1764 net/ipv6/netfilter/ip6_tables.c int ip6t_register_table(struct net *net, const struct xt_table *table, table 1786 net/ipv6/netfilter/ip6_tables.c new_table = xt_register_table(net, table, &bootstrap, newinfo); table 1797 net/ipv6/netfilter/ip6_tables.c ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1810 net/ipv6/netfilter/ip6_tables.c void ip6t_unregister_table(struct net *net, struct xt_table *table, table 1814 net/ipv6/netfilter/ip6_tables.c nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); table 1815 net/ipv6/netfilter/ip6_tables.c __ip6t_unregister_table(net, table); table 109 net/ipv6/netfilter/ip6t_NPT.c .table = "mangle", table 121 net/ipv6/netfilter/ip6t_NPT.c .table = "mangle", table 103 net/ipv6/netfilter/ip6t_REJECT.c .table = "filter", table 119 net/ipv6/netfilter/ip6t_rpfilter.c if (strcmp(par->table, "mangle") != 0 && table 120 net/ipv6/netfilter/ip6t_rpfilter.c strcmp(par->table, "raw") != 0) { table 122 net/ipv6/netfilter/ip6t_rpfilter.c par->table); table 51 net/ipv6/netfilter/ip6table_raw.c const struct xt_table *table = &packet_raw; table 55 net/ipv6/netfilter/ip6table_raw.c table = &packet_raw_before_defrag; table 60 net/ipv6/netfilter/ip6table_raw.c repl = ip6t_alloc_initial_table(table); table 63 net/ipv6/netfilter/ip6table_raw.c ret = ip6t_register_table(net, table, repl, rawtable_ops, table 84 net/ipv6/netfilter/ip6table_raw.c const struct xt_table *table = &packet_raw; table 87 net/ipv6/netfilter/ip6table_raw.c table = &packet_raw_before_defrag; table 93 net/ipv6/netfilter/ip6table_raw.c rawtable_ops = xt_hook_ops_alloc(table, ip6table_raw_hook); table 78 net/ipv6/netfilter/nf_conntrack_reasm.c struct ctl_table *table; table 81 net/ipv6/netfilter/nf_conntrack_reasm.c table = nf_ct_frag6_sysctl_table; table 83 net/ipv6/netfilter/nf_conntrack_reasm.c table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table), table 85 net/ipv6/netfilter/nf_conntrack_reasm.c if (table == NULL) table 89 net/ipv6/netfilter/nf_conntrack_reasm.c table[0].data = &net->nf_frag.fqdir->timeout; table 90 net/ipv6/netfilter/nf_conntrack_reasm.c table[1].data = &net->nf_frag.fqdir->low_thresh; table 91 net/ipv6/netfilter/nf_conntrack_reasm.c table[1].extra2 = &net->nf_frag.fqdir->high_thresh; table 92 net/ipv6/netfilter/nf_conntrack_reasm.c table[2].data = &net->nf_frag.fqdir->high_thresh; table 93 net/ipv6/netfilter/nf_conntrack_reasm.c table[2].extra1 = &net->nf_frag.fqdir->low_thresh; table 94 net/ipv6/netfilter/nf_conntrack_reasm.c table[2].extra2 = &init_net.nf_frag.fqdir->high_thresh; table 96 net/ipv6/netfilter/nf_conntrack_reasm.c hdr = register_net_sysctl(net, "net/netfilter", table); table 105 net/ipv6/netfilter/nf_conntrack_reasm.c kfree(table); table 112 net/ipv6/netfilter/nf_conntrack_reasm.c struct ctl_table *table; table 114 net/ipv6/netfilter/nf_conntrack_reasm.c table = net->nf_frag_frags_hdr->ctl_table_arg; table 117 net/ipv6/netfilter/nf_conntrack_reasm.c kfree(table); table 432 net/ipv6/reassembly.c struct ctl_table *table; table 435 net/ipv6/reassembly.c table = ip6_frags_ns_ctl_table; table 437 net/ipv6/reassembly.c table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); table 438 net/ipv6/reassembly.c if (!table) table 442 net/ipv6/reassembly.c table[0].data = &net->ipv6.fqdir->high_thresh; table 443 net/ipv6/reassembly.c table[0].extra1 = &net->ipv6.fqdir->low_thresh; table 444 net/ipv6/reassembly.c table[1].data = &net->ipv6.fqdir->low_thresh; table 445 net/ipv6/reassembly.c table[1].extra2 = &net->ipv6.fqdir->high_thresh; table 446 net/ipv6/reassembly.c table[2].data = &net->ipv6.fqdir->timeout; table 448 net/ipv6/reassembly.c hdr = register_net_sysctl(net, "net/ipv6", table); table 457 net/ipv6/reassembly.c kfree(table); table 464 net/ipv6/reassembly.c struct ctl_table *table; table 466 net/ipv6/reassembly.c table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; table 469 net/ipv6/reassembly.c kfree(table); table 1213 net/ipv6/route.c struct fib6_table *table, table 1226 net/ipv6/route.c fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); table 1261 net/ipv6/route.c trace_fib6_table_lookup(net, &res, table, fl6); table 1311 net/ipv6/route.c struct fib6_table *table; table 1313 net/ipv6/route.c table = rt->fib6_table; table 1314 net/ipv6/route.c spin_lock_bh(&table->tb6_lock); table 1315 net/ipv6/route.c err = fib6_add(&table->tb6_root, rt, info, extack); table 1316 net/ipv6/route.c spin_unlock_bh(&table->tb6_lock); table 2180 net/ipv6/route.c int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, table 2185 net/ipv6/route.c fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); table 2205 net/ipv6/route.c trace_fib6_table_lookup(net, res, table, fl6); table 2210 net/ipv6/route.c struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, table 2228 net/ipv6/route.c fib6_table_lookup(net, table, oif, fl6, &res, strict); table 2281 net/ipv6/route.c struct fib6_table *table, table 2286 net/ipv6/route.c return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags); table 2475 net/ipv6/route.c struct fib6_table *table, table 2480 net/ipv6/route.c return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags); table 2922 net/ipv6/route.c struct fib6_table *table, table 2956 net/ipv6/route.c fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); table 3006 net/ipv6/route.c trace_fib6_table_lookup(net, &res, table, fl6); table 3230 net/ipv6/route.c struct fib6_table *table; table 3233 net/ipv6/route.c table = fib6_get_table(net, tbid); table 3234 net/ipv6/route.c if (!table) table 3242 net/ipv6/route.c err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags); table 3581 net/ipv6/route.c struct fib6_table *table; table 3632 net/ipv6/route.c table = fib6_get_table(net, cfg->fc_table); table 3633 net/ipv6/route.c if (!table) { table 3635 net/ipv6/route.c table = fib6_new_table(net, cfg->fc_table); table 3638 net/ipv6/route.c table = fib6_new_table(net, cfg->fc_table); table 3641 net/ipv6/route.c if (!table) table 3671 net/ipv6/route.c rt->fib6_table = table; table 3750 net/ipv6/route.c struct fib6_table *table; table 3758 net/ipv6/route.c table = rt->fib6_table; table 3759 net/ipv6/route.c spin_lock_bh(&table->tb6_lock); table 3761 net/ipv6/route.c spin_unlock_bh(&table->tb6_lock); table 3780 net/ipv6/route.c struct fib6_table *table; table 3785 net/ipv6/route.c table = rt->fib6_table; table 3786 net/ipv6/route.c spin_lock_bh(&table->tb6_lock); table 3822 net/ipv6/route.c spin_unlock_bh(&table->tb6_lock); table 3892 net/ipv6/route.c struct fib6_table *table; table 3897 net/ipv6/route.c table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); table 3898 net/ipv6/route.c if (!table) { table 3905 net/ipv6/route.c fn = fib6_locate(&table->tb6_root, table 4128 net/ipv6/route.c struct fib6_table *table; table 4130 net/ipv6/route.c table = fib6_get_table(net, tb_id); table 4131 net/ipv6/route.c if (!table) table 4135 net/ipv6/route.c fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true); table 4198 net/ipv6/route.c struct fib6_table *table; table 4200 net/ipv6/route.c table = fib6_get_table(net, tb_id); table 4201 net/ipv6/route.c if (!table) table 4205 net/ipv6/route.c for_each_fib6_node_rt_rcu(&table->tb6_root) { table 4245 net/ipv6/route.c struct fib6_table *table; table 4247 net/ipv6/route.c table = fib6_get_table(dev_net(dev), cfg.fc_table); table 4248 net/ipv6/route.c if (table) table 4249 net/ipv6/route.c table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; table 4256 net/ipv6/route.c struct fib6_table *table) table 4262 net/ipv6/route.c for_each_fib6_node_rt_rcu(&table->tb6_root) { table 4276 net/ipv6/route.c table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; table 4281 net/ipv6/route.c struct fib6_table *table; table 4289 net/ipv6/route.c hlist_for_each_entry_rcu(table, head, tb6_hlist) { table 4290 net/ipv6/route.c if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) table 4291 net/ipv6/route.c __rt6_purge_dflt_routers(net, table); table 5404 net/ipv6/route.c u32 *pmetrics, table, rt6_flags; table 5430 net/ipv6/route.c table = rt->fib6_table->tb6_id; table 5432 net/ipv6/route.c table = RT6_TABLE_UNSPEC; table 5433 net/ipv6/route.c rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; table 5434 net/ipv6/route.c if (nla_put_u32(skb, RTA_TABLE, table)) table 6165 net/ipv6/route.c struct ctl_table *table; table 6167 net/ipv6/route.c table = kmemdup(ipv6_route_table_template, table 6171 net/ipv6/route.c if (table) { table 6172 net/ipv6/route.c table[0].data = &net->ipv6.sysctl.flush_delay; table 6173 net/ipv6/route.c table[0].extra1 = net; table 6174 net/ipv6/route.c table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; table 6175 net/ipv6/route.c table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; table 6176 net/ipv6/route.c table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; table 6177 net/ipv6/route.c table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; table 6178 net/ipv6/route.c table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; table 6179 net/ipv6/route.c table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; table 6180 net/ipv6/route.c table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; table 6181 net/ipv6/route.c table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; table 6182 net/ipv6/route.c table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; table 6183 net/ipv6/route.c table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down; table 6187 net/ipv6/route.c table[0].procname = NULL; table 6190 net/ipv6/route.c return table; table 51 net/ipv6/seg6_local.c int table; table 177 net/ipv6/seg6_local.c struct fib6_table *table; table 179 net/ipv6/seg6_local.c table = fib6_get_table(net, tbl_id); table 180 net/ipv6/seg6_local.c if (!table) table 183 net/ipv6/seg6_local.c rt = ip6_pol_route(net, table, 0, &fl6, skb, flags); table 254 net/ipv6/seg6_local.c seg6_lookup_nexthop(skb, NULL, slwt->table); table 401 net/ipv6/seg6_local.c seg6_lookup_nexthop(skb, NULL, slwt->table); table 701 net/ipv6/seg6_local.c slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]); table 708 net/ipv6/seg6_local.c if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table)) table 716 net/ipv6/seg6_local.c if (a->table != b->table) table 28 net/ipv6/sysctl_net_ipv6.c static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write, table 35 net/ipv6/sysctl_net_ipv6.c net = container_of(table->data, struct net, table 37 net/ipv6/sysctl_net_ipv6.c ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); table 200 net/ipv6/xfrm6_policy.c struct ctl_table *table; table 203 net/ipv6/xfrm6_policy.c table = xfrm6_policy_table; table 205 net/ipv6/xfrm6_policy.c table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL); table 206 net/ipv6/xfrm6_policy.c if (!table) table 209 net/ipv6/xfrm6_policy.c table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh; table 212 net/ipv6/xfrm6_policy.c hdr = register_net_sysctl(net, "net/ipv6", table); table 221 net/ipv6/xfrm6_policy.c kfree(table); table 228 net/ipv6/xfrm6_policy.c struct ctl_table *table; table 233 net/ipv6/xfrm6_policy.c table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg; table 236 net/ipv6/xfrm6_policy.c kfree(table); table 38 net/key/af_key.c struct hlist_head table; table 121 net/key/af_key.c sk_add_node_rcu(sk, &net_pfkey->table); table 234 net/key/af_key.c sk_for_each_rcu(sk, &net_pfkey->table) { table 3121 net/key/af_key.c sk_for_each_rcu(sk, &net_pfkey->table) { table 3781 net/key/af_key.c return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos); table 3789 net/key/af_key.c return seq_hlist_next_rcu(v, &net_pfkey->table, ppos); table 3848 net/key/af_key.c INIT_HLIST_HEAD(&net_pfkey->table); table 3861 net/key/af_key.c WARN_ON(!hlist_empty(&net_pfkey->table)); table 164 net/l3mdev/l3mdev.c arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev); table 172 net/l3mdev/l3mdev.c arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev); table 267 net/mac802154/cfg.c struct ieee802154_llsec_table **table) table 272 net/mac802154/cfg.c *table = &sdata->sec.table; table 33 net/mac802154/llsec.c INIT_LIST_HEAD(&sec->table.security_levels); table 34 net/mac802154/llsec.c INIT_LIST_HEAD(&sec->table.devices); table 35 net/mac802154/llsec.c INIT_LIST_HEAD(&sec->table.keys); table 47 net/mac802154/llsec.c list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) { table 55 net/mac802154/llsec.c list_for_each_entry_safe(dev, dn, &sec->table.devices, list) { table 63 net/mac802154/llsec.c list_for_each_entry_safe(key, kn, &sec->table.keys, list) { table 223 net/mac802154/llsec.c list_for_each_entry(pos, &sec->table.keys, list) { table 259 net/mac802154/llsec.c list_add_rcu(&new->list, &sec->table.keys); table 273 net/mac802154/llsec.c list_for_each_entry(pos, &sec->table.keys, list) { table 378 net/mac802154/llsec.c list_add_tail_rcu(&entry->dev.list, &sec->table.devices); table 472 net/mac802154/llsec.c list_for_each_entry(pos, &sec->table.security_levels, list) { table 501 net/mac802154/llsec.c list_add_tail_rcu(&entry->level.list, &sec->table.security_levels); table 562 net/mac802154/llsec.c list_for_each_entry_rcu(key_entry, &sec->table.keys, list) { table 814 net/mac802154/llsec.c list_for_each_entry_rcu(level, &sec->table.security_levels, list) { table 55 net/mac802154/llsec.h struct ieee802154_llsec_table table; table 209 net/mac802154/mib.c *t = &sdata->sec.table; table 1403 net/mpls/af_mpls.c struct ctl_table *table; table 1406 net/mpls/af_mpls.c table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL); table 1407 net/mpls/af_mpls.c if (!table) table 1414 net/mpls/af_mpls.c table[i].data = (char *)mdev + (uintptr_t)table[i].data; table 1415 net/mpls/af_mpls.c table[i].extra1 = mdev; table 1416 net/mpls/af_mpls.c table[i].extra2 = net; table 1421 net/mpls/af_mpls.c mdev->sysctl = register_net_sysctl(net, path, table); table 1429 net/mpls/af_mpls.c kfree(table); table 1438 net/mpls/af_mpls.c struct ctl_table *table; table 1440 net/mpls/af_mpls.c table = mdev->sysctl->ctl_table_arg; table 1442 net/mpls/af_mpls.c kfree(table); table 1656 net/mpls/af_mpls.c u8 table, const void *addr, int alen) table 1669 net/mpls/af_mpls.c if (table <= NEIGH_NR_TABLES) table 1670 net/mpls/af_mpls.c family = table_to_family[table]; table 2596 net/mpls/af_mpls.c static int mpls_platform_labels(struct ctl_table *table, int write, table 2599 net/mpls/af_mpls.c struct net *net = table->data; table 2603 net/mpls/af_mpls.c .procname = table->procname, table 2606 net/mpls/af_mpls.c .mode = table->mode, table 2653 net/mpls/af_mpls.c struct ctl_table *table; table 2661 net/mpls/af_mpls.c table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL); table 2662 net/mpls/af_mpls.c if (table == NULL) table 2669 net/mpls/af_mpls.c table[i].data = (char *)net + (uintptr_t)table[i].data; table 2671 net/mpls/af_mpls.c net->mpls.ctl = register_net_sysctl(net, "net/mpls", table); table 2673 net/mpls/af_mpls.c kfree(table); table 2684 net/mpls/af_mpls.c struct ctl_table *table; table 2687 net/mpls/af_mpls.c table = net->mpls.ctl->ctl_table_arg; table 2689 net/mpls/af_mpls.c kfree(table); table 317 net/netfilter/ipset/ip_set_hash_gen.h struct htable __rcu *table; /* the hash table */ table 430 net/netfilter/ipset/ip_set_hash_gen.h t = ipset_dereference_nfnl(h->table); table 484 net/netfilter/ipset/ip_set_hash_gen.h mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true); table 600 net/netfilter/ipset/ip_set_hash_gen.h t = ipset_dereference_set(h->table, set); table 665 net/netfilter/ipset/ip_set_hash_gen.h orig = ipset_dereference_bh_nfnl(h->table); table 697 net/netfilter/ipset/ip_set_hash_gen.h orig = ipset_dereference_bh_nfnl(h->table); table 779 net/netfilter/ipset/ip_set_hash_gen.h rcu_assign_pointer(h->table, t); table 832 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); table 869 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); table 1062 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); table 1175 net/netfilter/ipset/ip_set_hash_gen.h struct htable *t = rcu_dereference_bh(h->table); table 1238 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); table 1286 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); table 1330 net/netfilter/ipset/ip_set_hash_gen.h t = ipset_dereference_bh_nfnl(h->table); table 1554 net/netfilter/ipset/ip_set_hash_gen.h RCU_INIT_POINTER(h->table, t); table 127 net/netfilter/ipvs/ip_vs_core.c void ip_vs_init_hash_table(struct list_head *table, int rows) table 130 net/netfilter/ipvs/ip_vs_core.c INIT_LIST_HEAD(&table[rows]); table 1732 net/netfilter/ipvs/ip_vs_ctl.c proc_do_defense_mode(struct ctl_table *table, int write, table 1735 net/netfilter/ipvs/ip_vs_ctl.c struct netns_ipvs *ipvs = table->extra2; table 1736 net/netfilter/ipvs/ip_vs_ctl.c int *valp = table->data; table 1743 net/netfilter/ipvs/ip_vs_ctl.c .mode = table->mode, table 1759 net/netfilter/ipvs/ip_vs_ctl.c proc_do_sync_threshold(struct ctl_table *table, int write, table 1762 net/netfilter/ipvs/ip_vs_ctl.c int *valp = table->data; table 1767 net/netfilter/ipvs/ip_vs_ctl.c .maxlen = table->maxlen, table 1768 net/netfilter/ipvs/ip_vs_ctl.c .mode = table->mode, table 1784 net/netfilter/ipvs/ip_vs_ctl.c proc_do_sync_ports(struct ctl_table *table, int write, table 1787 net/netfilter/ipvs/ip_vs_ctl.c int *valp = table->data; table 1794 net/netfilter/ipvs/ip_vs_ctl.c .mode = table->mode, table 1995 net/netfilter/ipvs/ip_vs_ctl.c struct hlist_head *table; table 2031 net/netfilter/ipvs/ip_vs_ctl.c iter->table = ip_vs_svc_table; table 2043 net/netfilter/ipvs/ip_vs_ctl.c iter->table = ip_vs_svc_fwm_table; table 2074 net/netfilter/ipvs/ip_vs_ctl.c if (iter->table == ip_vs_svc_table) { table 2088 net/netfilter/ipvs/ip_vs_ctl.c iter->table = ip_vs_svc_fwm_table; table 2137 net/netfilter/ipvs/ip_vs_ctl.c if (iter->table == ip_vs_svc_table) { table 163 net/netfilter/ipvs/ip_vs_mh.c unsigned long *table; table 177 net/netfilter/ipvs/ip_vs_mh.c table = kcalloc(BITS_TO_LONGS(IP_VS_MH_TAB_SIZE), table 179 net/netfilter/ipvs/ip_vs_mh.c if (!table) table 199 net/netfilter/ipvs/ip_vs_mh.c while (test_bit(c, table)) { table 207 net/netfilter/ipvs/ip_vs_mh.c __set_bit(c, table); table 230 net/netfilter/ipvs/ip_vs_mh.c kfree(table); table 190 net/netfilter/ipvs/ip_vs_proto.c ip_vs_create_timeout_table(int *table, int size) table 192 net/netfilter/ipvs/ip_vs_proto.c return kmemdup(table, size, GFP_KERNEL); table 519 net/netfilter/nf_conntrack_standalone.c nf_conntrack_hash_sysctl(struct ctl_table *table, int write, table 524 net/netfilter/nf_conntrack_standalone.c ret = proc_dointvec(table, write, buffer, lenp, ppos); table 938 net/netfilter/nf_conntrack_standalone.c struct ctl_table *table) table 943 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \ table 958 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval) table 967 net/netfilter/nf_conntrack_standalone.c struct ctl_table *table) table 973 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \ table 990 net/netfilter/nf_conntrack_standalone.c struct ctl_table *table) table 996 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_ ## XNAME].data = \ table 1008 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_DCCP_LOOSE].data = &dn->dccp_loose; table 1013 net/netfilter/nf_conntrack_standalone.c struct ctl_table *table) table 1018 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED]; table 1019 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED]; table 1026 net/netfilter/nf_conntrack_standalone.c struct ctl_table *table; table 1030 net/netfilter/nf_conntrack_standalone.c table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), table 1032 net/netfilter/nf_conntrack_standalone.c if (!table) table 1035 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; table 1036 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; table 1037 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; table 1038 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; table 1039 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; table 1041 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; table 1044 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; table 1046 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; table 1047 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; table 1048 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; table 1049 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED]; table 1050 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED]; table 1052 net/netfilter/nf_conntrack_standalone.c nf_conntrack_standalone_init_tcp_sysctl(net, table); table 1053 net/netfilter/nf_conntrack_standalone.c nf_conntrack_standalone_init_sctp_sysctl(net, table); table 1054 net/netfilter/nf_conntrack_standalone.c nf_conntrack_standalone_init_dccp_sysctl(net, table); table 1055 net/netfilter/nf_conntrack_standalone.c nf_conntrack_standalone_init_gre_sysctl(net, table); table 1059 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_MAX].procname = NULL; table 1060 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_ACCT].procname = NULL; table 1061 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_HELPER].procname = NULL; table 1063 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_TIMESTAMP].procname = NULL; table 1066 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_EVENTS].procname = NULL; table 1071 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_BUCKETS].mode = 0444; table 1073 net/netfilter/nf_conntrack_standalone.c net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); table 1080 net/netfilter/nf_conntrack_standalone.c kfree(table); table 1086 net/netfilter/nf_conntrack_standalone.c struct ctl_table *table; table 1088 net/netfilter/nf_conntrack_standalone.c table = net->ct.sysctl_header->ctl_table_arg; table 1090 net/netfilter/nf_conntrack_standalone.c kfree(table); table 416 net/netfilter/nf_log.c static int nf_log_proc_dostring(struct ctl_table *table, int write, table 422 net/netfilter/nf_log.c int tindex = (unsigned long)table->extra1; table 423 net/netfilter/nf_log.c struct net *net = table->extra2; table 426 net/netfilter/nf_log.c struct ctl_table tmp = *table; table 450 net/netfilter/nf_log.c struct ctl_table tmp = *table; table 469 net/netfilter/nf_log.c struct ctl_table *table; table 471 net/netfilter/nf_log.c table = nf_log_sysctl_table; table 473 net/netfilter/nf_log.c table = kmemdup(nf_log_sysctl_table, table 476 net/netfilter/nf_log.c if (!table) table 498 net/netfilter/nf_log.c table[i].extra2 = net; table 502 net/netfilter/nf_log.c table); table 510 net/netfilter/nf_log.c kfree(table); table 520 net/netfilter/nf_log.c struct ctl_table *table; table 522 net/netfilter/nf_log.c table = net->nf.nf_log_dir_header->ctl_table_arg; table 525 net/netfilter/nf_log.c kfree(table); table 91 net/netfilter/nf_tables_api.c struct nft_table *table, table 98 net/netfilter/nf_tables_api.c ctx->table = table; table 157 net/netfilter/nf_tables_api.c const struct nft_table *table, table 163 net/netfilter/nf_tables_api.c if (table->flags & NFT_TABLE_F_DORMANT || table 177 net/netfilter/nf_tables_api.c const struct nft_table *table, table 183 net/netfilter/nf_tables_api.c if (table->flags & NFT_TABLE_F_DORMANT || table 204 net/netfilter/nf_tables_api.c nft_activate_next(ctx->net, ctx->table); table 218 net/netfilter/nf_tables_api.c nft_deactivate_next(ctx->net, ctx->table); table 245 net/netfilter/nf_tables_api.c ctx->table->use--; table 375 net/netfilter/nf_tables_api.c ctx->table->use--; table 407 net/netfilter/nf_tables_api.c ctx->table->use--; table 441 net/netfilter/nf_tables_api.c ctx->table->use--; table 454 net/netfilter/nf_tables_api.c struct nft_table *table; table 459 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 460 net/netfilter/nf_tables_api.c if (!nla_strcmp(nla, table->name) && table 461 net/netfilter/nf_tables_api.c table->family == family && table 462 net/netfilter/nf_tables_api.c nft_active_genmask(table, genmask)) table 463 net/netfilter/nf_tables_api.c return table; table 473 net/netfilter/nf_tables_api.c struct nft_table *table; table 475 net/netfilter/nf_tables_api.c list_for_each_entry(table, &net->nft.tables, list) { table 476 net/netfilter/nf_tables_api.c if (be64_to_cpu(nla_get_be64(nla)) == table->handle && table 477 net/netfilter/nf_tables_api.c nft_active_genmask(table, genmask)) table 478 net/netfilter/nf_tables_api.c return table; table 484 net/netfilter/nf_tables_api.c static inline u64 nf_tables_alloc_handle(struct nft_table *table) table 486 net/netfilter/nf_tables_api.c return ++table->hgenerator; table 597 net/netfilter/nf_tables_api.c int family, const struct nft_table *table) table 612 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) || table 613 net/netfilter/nf_tables_api.c nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) || table 614 net/netfilter/nf_tables_api.c nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) || table 615 net/netfilter/nf_tables_api.c nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle), table 641 net/netfilter/nf_tables_api.c event, 0, ctx->family, ctx->table); table 658 net/netfilter/nf_tables_api.c const struct nft_table *table; table 666 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 667 net/netfilter/nf_tables_api.c if (family != NFPROTO_UNSPEC && family != table->family) table 675 net/netfilter/nf_tables_api.c if (!nft_is_active(net, table)) table 681 net/netfilter/nf_tables_api.c table->family, table) < 0) table 719 net/netfilter/nf_tables_api.c const struct nft_table *table; table 733 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_TABLE_NAME], family, genmask); table 734 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 736 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 745 net/netfilter/nf_tables_api.c family, table); table 756 net/netfilter/nf_tables_api.c static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt) table 761 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) { table 774 net/netfilter/nf_tables_api.c static int nf_tables_table_enable(struct net *net, struct nft_table *table) table 779 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) { table 794 net/netfilter/nf_tables_api.c nft_table_disable(net, table, i); table 798 net/netfilter/nf_tables_api.c static void nf_tables_table_disable(struct net *net, struct nft_table *table) table 800 net/netfilter/nf_tables_api.c nft_table_disable(net, table, 0); table 816 net/netfilter/nf_tables_api.c if (flags == ctx->table->flags) table 825 net/netfilter/nf_tables_api.c !(ctx->table->flags & NFT_TABLE_F_DORMANT)) { table 828 net/netfilter/nf_tables_api.c ctx->table->flags & NFT_TABLE_F_DORMANT) { table 829 net/netfilter/nf_tables_api.c ret = nf_tables_table_enable(ctx->net, ctx->table); table 831 net/netfilter/nf_tables_api.c ctx->table->flags &= ~NFT_TABLE_F_DORMANT; table 873 net/netfilter/nf_tables_api.c seed ^= hash_ptr(k->table, 32); table 891 net/netfilter/nf_tables_api.c if (obj->key.table != k->table) table 906 net/netfilter/nf_tables_api.c struct nft_table *table; table 913 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, attr, family, genmask); table 914 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 915 net/netfilter/nf_tables_api.c if (PTR_ERR(table) != -ENOENT) table 916 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 925 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 936 net/netfilter/nf_tables_api.c table = kzalloc(sizeof(*table), GFP_KERNEL); table 937 net/netfilter/nf_tables_api.c if (table == NULL) table 940 net/netfilter/nf_tables_api.c table->name = nla_strdup(attr, GFP_KERNEL); table 941 net/netfilter/nf_tables_api.c if (table->name == NULL) table 944 net/netfilter/nf_tables_api.c err = rhltable_init(&table->chains_ht, &nft_chain_ht_params); table 948 net/netfilter/nf_tables_api.c INIT_LIST_HEAD(&table->chains); table 949 net/netfilter/nf_tables_api.c INIT_LIST_HEAD(&table->sets); table 950 net/netfilter/nf_tables_api.c INIT_LIST_HEAD(&table->objects); table 951 net/netfilter/nf_tables_api.c INIT_LIST_HEAD(&table->flowtables); table 952 net/netfilter/nf_tables_api.c table->family = family; table 953 net/netfilter/nf_tables_api.c table->flags = flags; table 954 net/netfilter/nf_tables_api.c table->handle = ++table_handle; table 956 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 961 net/netfilter/nf_tables_api.c list_add_tail_rcu(&table->list, &net->nft.tables); table 964 net/netfilter/nf_tables_api.c rhltable_destroy(&table->chains_ht); table 966 net/netfilter/nf_tables_api.c kfree(table->name); table 968 net/netfilter/nf_tables_api.c kfree(table); table 981 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &ctx->table->chains, list) { table 992 net/netfilter/nf_tables_api.c list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { table 1005 net/netfilter/nf_tables_api.c list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) { table 1014 net/netfilter/nf_tables_api.c list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) { table 1023 net/netfilter/nf_tables_api.c list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { table 1041 net/netfilter/nf_tables_api.c struct nft_table *table, *nt; table 1045 net/netfilter/nf_tables_api.c list_for_each_entry_safe(table, nt, &ctx->net->nft.tables, list) { table 1046 net/netfilter/nf_tables_api.c if (family != AF_UNSPEC && table->family != family) table 1049 net/netfilter/nf_tables_api.c ctx->family = table->family; table 1051 net/netfilter/nf_tables_api.c if (!nft_is_active_next(ctx->net, table)) table 1055 net/netfilter/nf_tables_api.c nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) table 1058 net/netfilter/nf_tables_api.c ctx->table = table; table 1077 net/netfilter/nf_tables_api.c struct nft_table *table; table 1087 net/netfilter/nf_tables_api.c table = nft_table_lookup_byhandle(net, attr, genmask); table 1090 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, attr, family, genmask); table 1093 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 1095 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 1099 net/netfilter/nf_tables_api.c table->use > 0) table 1103 net/netfilter/nf_tables_api.c ctx.table = table; table 1110 net/netfilter/nf_tables_api.c if (WARN_ON(ctx->table->use > 0)) table 1113 net/netfilter/nf_tables_api.c rhltable_destroy(&ctx->table->chains_ht); table 1114 net/netfilter/nf_tables_api.c kfree(ctx->table->name); table 1115 net/netfilter/nf_tables_api.c kfree(ctx->table); table 1143 net/netfilter/nf_tables_api.c nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask) table 1147 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) { table 1166 net/netfilter/nf_tables_api.c struct nft_table *table, table 1183 net/netfilter/nf_tables_api.c list = rhltable_lookup(&table->chains_ht, search, nft_chain_ht_params); table 1259 net/netfilter/nf_tables_api.c int family, const struct nft_table *table, table 1275 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) table 1344 net/netfilter/nf_tables_api.c event, 0, ctx->family, ctx->table, table 1362 net/netfilter/nf_tables_api.c const struct nft_table *table; table 1371 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 1372 net/netfilter/nf_tables_api.c if (family != NFPROTO_UNSPEC && family != table->family) table 1375 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(chain, &table->chains, list) { table 1388 net/netfilter/nf_tables_api.c table->family, table, table 1412 net/netfilter/nf_tables_api.c struct nft_table *table; table 1426 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask); table 1427 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 1429 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 1432 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(net, table, nla[NFTA_CHAIN_NAME], genmask); table 1444 net/netfilter/nf_tables_api.c family, table, chain); table 1654 net/netfilter/nf_tables_api.c struct nft_table *table = ctx->table; table 1663 net/netfilter/nf_tables_api.c if (table->use == UINT_MAX) table 1720 net/netfilter/nf_tables_api.c chain->handle = nf_tables_alloc_handle(table); table 1721 net/netfilter/nf_tables_api.c chain->table = table; table 1738 net/netfilter/nf_tables_api.c err = nf_tables_register_hook(net, table, chain); table 1742 net/netfilter/nf_tables_api.c err = rhltable_insert_key(&table->chains_ht, chain->name, table 1750 net/netfilter/nf_tables_api.c rhltable_remove(&table->chains_ht, &chain->rhlhead, table 1759 net/netfilter/nf_tables_api.c table->use++; table 1760 net/netfilter/nf_tables_api.c list_add_tail_rcu(&chain->list, &table->chains); table 1764 net/netfilter/nf_tables_api.c nf_tables_unregister_hook(net, table, chain); table 1775 net/netfilter/nf_tables_api.c struct nft_table *table = ctx->table; table 1816 net/netfilter/nf_tables_api.c chain2 = nft_chain_lookup(ctx->net, table, table 1858 net/netfilter/nf_tables_api.c tmp->ctx.table == table && table 1887 net/netfilter/nf_tables_api.c struct nft_table *table; table 1896 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask); table 1897 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 1899 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 1907 net/netfilter/nf_tables_api.c chain = nft_chain_lookup_byhandle(table, handle, genmask); table 1914 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(net, table, attr, genmask); table 1952 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); table 1978 net/netfilter/nf_tables_api.c struct nft_table *table; table 1986 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask); table 1987 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 1989 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 1995 net/netfilter/nf_tables_api.c chain = nft_chain_lookup_byhandle(table, handle, genmask); table 1998 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(net, table, attr, genmask); table 2009 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); table 2343 net/netfilter/nf_tables_api.c const struct nft_table *table, table 2363 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_RULE_TABLE, table->name)) table 2417 net/netfilter/nf_tables_api.c event, 0, ctx->family, ctx->table, table 2432 net/netfilter/nf_tables_api.c char *table; table 2439 net/netfilter/nf_tables_api.c const struct nft_table *table, table 2460 net/netfilter/nf_tables_api.c table->family, table 2461 net/netfilter/nf_tables_api.c table, chain, rule, prule) < 0) table 2478 net/netfilter/nf_tables_api.c struct nft_table *table; table 2487 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 2488 net/netfilter/nf_tables_api.c if (family != NFPROTO_UNSPEC && family != table->family) table 2491 net/netfilter/nf_tables_api.c if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) table 2494 net/netfilter/nf_tables_api.c if (ctx && ctx->table && ctx->chain) { table 2497 net/netfilter/nf_tables_api.c list = rhltable_lookup(&table->chains_ht, ctx->chain, table 2506 net/netfilter/nf_tables_api.c cb, table, chain); table 2512 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(chain, &table->chains, list) { table 2513 net/netfilter/nf_tables_api.c if (__nf_tables_dump_rules(skb, &idx, cb, table, chain)) table 2517 net/netfilter/nf_tables_api.c if (ctx && ctx->table) table 2538 net/netfilter/nf_tables_api.c ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], table 2540 net/netfilter/nf_tables_api.c if (!ctx->table) { table 2549 net/netfilter/nf_tables_api.c kfree(ctx->table); table 2565 net/netfilter/nf_tables_api.c kfree(ctx->table); table 2582 net/netfilter/nf_tables_api.c struct nft_table *table; table 2599 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask); table 2600 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 2602 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 2605 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask); table 2623 net/netfilter/nf_tables_api.c family, table, chain, rule, NULL); table 2687 net/netfilter/nf_tables_api.c static int nft_table_validate(struct net *net, const struct nft_table *table) table 2692 net/netfilter/nf_tables_api.c .family = table->family, table 2696 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) { table 2724 net/netfilter/nf_tables_api.c struct nft_table *table; table 2738 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask); table 2739 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 2741 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 2744 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask); table 2770 net/netfilter/nf_tables_api.c handle = nf_tables_alloc_handle(table); table 2791 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); table 2892 net/netfilter/nf_tables_api.c return nft_table_validate(net, table); table 2940 net/netfilter/nf_tables_api.c struct nft_table *table; table 2946 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask); table 2947 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 2949 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 2953 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], table 2961 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); table 2984 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) { table 3142 net/netfilter/nf_tables_api.c struct nft_table *table = NULL; table 3145 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, table 3147 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 3149 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 3153 net/netfilter/nf_tables_api.c nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla); table 3157 net/netfilter/nf_tables_api.c static struct nft_set *nft_set_lookup(const struct nft_table *table, table 3165 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(set, &table->sets, list) { table 3173 net/netfilter/nf_tables_api.c static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table, table 3179 net/netfilter/nf_tables_api.c list_for_each_entry(set, &table->sets, list) { table 3206 net/netfilter/nf_tables_api.c const struct nft_table *table, table 3213 net/netfilter/nf_tables_api.c set = nft_set_lookup(table, nla_set_name, genmask); table 3241 net/netfilter/nf_tables_api.c list_for_each_entry(i, &ctx->table->sets, list) { table 3267 net/netfilter/nf_tables_api.c list_for_each_entry(i, &ctx->table->sets, list) { table 3317 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) table 3408 net/netfilter/nf_tables_api.c struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; table 3418 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 3420 net/netfilter/nf_tables_api.c ctx->family != table->family) table 3423 net/netfilter/nf_tables_api.c if (ctx->table && ctx->table != table) table 3427 net/netfilter/nf_tables_api.c if (cur_table != table) table 3433 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(set, &table->sets, list) { table 3440 net/netfilter/nf_tables_api.c ctx_set.table = table; table 3441 net/netfilter/nf_tables_api.c ctx_set.family = table->family; table 3447 net/netfilter/nf_tables_api.c cb->args[2] = (unsigned long) table; table 3518 net/netfilter/nf_tables_api.c set = nft_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); table 3563 net/netfilter/nf_tables_api.c struct nft_table *table; table 3671 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask); table 3672 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 3674 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 3677 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 3679 net/netfilter/nf_tables_api.c set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask); table 3735 net/netfilter/nf_tables_api.c set->table = table; table 3750 net/netfilter/nf_tables_api.c set->handle = nf_tables_alloc_handle(table); table 3760 net/netfilter/nf_tables_api.c list_add_tail_rcu(&set->list, &table->sets); table 3761 net/netfilter/nf_tables_api.c table->use++; table 3810 net/netfilter/nf_tables_api.c set = nft_set_lookup_byhandle(ctx.table, attr, genmask); table 3813 net/netfilter/nf_tables_api.c set = nft_set_lookup(ctx.table, attr, genmask); table 3993 net/netfilter/nf_tables_api.c struct nft_table *table; table 3995 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, table 3997 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 3999 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 4002 net/netfilter/nf_tables_api.c nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla); table 4106 net/netfilter/nf_tables_api.c struct nft_table *table; table 4117 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 4119 net/netfilter/nf_tables_api.c dump_ctx->ctx.family != table->family) table 4122 net/netfilter/nf_tables_api.c if (table != dump_ctx->ctx.table) table 4125 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(set, &table->sets, list) { table 4149 net/netfilter/nf_tables_api.c nfmsg->nfgen_family = table->family; table 4153 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name)) table 4225 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) table 4344 net/netfilter/nf_tables_api.c set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask); table 4456 net/netfilter/nf_tables_api.c .family = set->table->family, table 4588 net/netfilter/nf_tables_api.c obj = nft_obj_lookup(ctx->net, ctx->table, table 4613 net/netfilter/nf_tables_api.c .table = ctx->table, table 4742 net/netfilter/nf_tables_api.c set = nft_set_lookup_global(net, ctx.table, nla[NFTA_SET_ELEM_LIST_SET], table 4757 net/netfilter/nf_tables_api.c return nft_table_validate(net, ctx.table); table 4936 net/netfilter/nf_tables_api.c set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask); table 5025 net/netfilter/nf_tables_api.c const struct nft_table *table, table 5029 net/netfilter/nf_tables_api.c struct nft_object_hash_key k = { .table = table }; table 5058 net/netfilter/nf_tables_api.c static struct nft_object *nft_obj_lookup_byhandle(const struct nft_table *table, table 5064 net/netfilter/nf_tables_api.c list_for_each_entry(obj, &table->objects, list) { table 5224 net/netfilter/nf_tables_api.c struct nft_table *table; table 5235 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask); table 5236 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 5238 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 5242 net/netfilter/nf_tables_api.c obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); table 5258 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 5263 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 5274 net/netfilter/nf_tables_api.c obj->key.table = table; table 5275 net/netfilter/nf_tables_api.c obj->handle = nf_tables_alloc_handle(table); table 5292 net/netfilter/nf_tables_api.c list_add_tail_rcu(&obj->list, &table->objects); table 5293 net/netfilter/nf_tables_api.c table->use++; table 5312 net/netfilter/nf_tables_api.c int family, const struct nft_table *table, table 5328 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) || table 5346 net/netfilter/nf_tables_api.c char *table; table 5353 net/netfilter/nf_tables_api.c const struct nft_table *table; table 5367 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 5368 net/netfilter/nf_tables_api.c if (family != NFPROTO_UNSPEC && family != table->family) table 5371 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(obj, &table->objects, list) { table 5379 net/netfilter/nf_tables_api.c if (filter && filter->table && table 5380 net/netfilter/nf_tables_api.c strcmp(filter->table, table->name)) table 5391 net/netfilter/nf_tables_api.c table->family, table, table 5418 net/netfilter/nf_tables_api.c filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); table 5419 net/netfilter/nf_tables_api.c if (!filter->table) { table 5438 net/netfilter/nf_tables_api.c kfree(filter->table); table 5454 net/netfilter/nf_tables_api.c const struct nft_table *table; table 5477 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask); table 5478 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 5480 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 5484 net/netfilter/nf_tables_api.c obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); table 5499 net/netfilter/nf_tables_api.c family, table, obj, reset); table 5528 net/netfilter/nf_tables_api.c struct nft_table *table; table 5537 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask); table 5538 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 5540 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 5546 net/netfilter/nf_tables_api.c obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask); table 5549 net/netfilter/nf_tables_api.c obj = nft_obj_lookup(net, table, attr, objtype, genmask); table 5561 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 5566 net/netfilter/nf_tables_api.c void nft_obj_notify(struct net *net, const struct nft_table *table, table 5582 net/netfilter/nf_tables_api.c table, obj, false); table 5598 net/netfilter/nf_tables_api.c nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event, table 5630 net/netfilter/nf_tables_api.c struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table, table 5635 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(flowtable, &table->flowtables, list) { table 5661 net/netfilter/nf_tables_api.c nft_flowtable_lookup_byhandle(const struct nft_table *table, table 5666 net/netfilter/nf_tables_api.c list_for_each_entry(flowtable, &table->flowtables, list) { table 5823 net/netfilter/nf_tables_api.c struct nft_table *table; table 5832 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family, table 5834 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 5836 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 5839 net/netfilter/nf_tables_api.c flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], table 5856 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 5862 net/netfilter/nf_tables_api.c flowtable->table = table; table 5863 net/netfilter/nf_tables_api.c flowtable->handle = nf_tables_alloc_handle(table); table 5891 net/netfilter/nf_tables_api.c list_for_each_entry(ft, &table->flowtables, list) { table 5913 net/netfilter/nf_tables_api.c list_add_tail_rcu(&flowtable->list, &table->flowtables); table 5914 net/netfilter/nf_tables_api.c table->use++; table 5946 net/netfilter/nf_tables_api.c struct nft_table *table; table 5954 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family, table 5956 net/netfilter/nf_tables_api.c if (IS_ERR(table)) { table 5958 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 5963 net/netfilter/nf_tables_api.c flowtable = nft_flowtable_lookup_byhandle(table, attr, genmask); table 5966 net/netfilter/nf_tables_api.c flowtable = nft_flowtable_lookup(table, attr, genmask); table 5978 net/netfilter/nf_tables_api.c nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); table 6003 net/netfilter/nf_tables_api.c if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) || table 6040 net/netfilter/nf_tables_api.c char *table; table 6052 net/netfilter/nf_tables_api.c const struct nft_table *table; table 6057 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(table, &net->nft.tables, list) { table 6058 net/netfilter/nf_tables_api.c if (family != NFPROTO_UNSPEC && family != table->family) table 6061 net/netfilter/nf_tables_api.c list_for_each_entry_rcu(flowtable, &table->flowtables, list) { table 6069 net/netfilter/nf_tables_api.c if (filter && filter->table && table 6070 net/netfilter/nf_tables_api.c strcmp(filter->table, table->name)) table 6077 net/netfilter/nf_tables_api.c table->family, flowtable) < 0) table 6102 net/netfilter/nf_tables_api.c filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], table 6104 net/netfilter/nf_tables_api.c if (!filter->table) { table 6121 net/netfilter/nf_tables_api.c kfree(filter->table); table 6138 net/netfilter/nf_tables_api.c const struct nft_table *table; table 6157 net/netfilter/nf_tables_api.c table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family, table 6159 net/netfilter/nf_tables_api.c if (IS_ERR(table)) table 6160 net/netfilter/nf_tables_api.c return PTR_ERR(table); table 6162 net/netfilter/nf_tables_api.c flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], table 6273 net/netfilter/nf_tables_api.c struct nft_table *table; table 6281 net/netfilter/nf_tables_api.c list_for_each_entry(table, &net->nft.tables, list) { table 6282 net/netfilter/nf_tables_api.c list_for_each_entry(flowtable, &table->flowtables, list) { table 6466 net/netfilter/nf_tables_api.c struct nft_table *table; table 6475 net/netfilter/nf_tables_api.c list_for_each_entry(table, &net->nft.tables, list) { table 6476 net/netfilter/nf_tables_api.c if (nft_table_validate(net, table) < 0) table 6511 net/netfilter/nf_tables_api.c rhltable_remove(&trans->ctx.table->chains_ht, table 6515 net/netfilter/nf_tables_api.c rhltable_insert_key(&trans->ctx.table->chains_ht, table 6732 net/netfilter/nf_tables_api.c struct nft_table *table = chain->table; table 6734 net/netfilter/nf_tables_api.c WARN_ON_ONCE(rhltable_remove(&table->chains_ht, &chain->rhlhead, table 6789 net/netfilter/nf_tables_api.c struct nft_table *table; table 6822 net/netfilter/nf_tables_api.c list_for_each_entry(table, &net->nft.tables, list) { table 6823 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) table 6842 net/netfilter/nf_tables_api.c trans->ctx.table); table 6843 net/netfilter/nf_tables_api.c trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; table 6846 net/netfilter/nf_tables_api.c nft_clear(net, trans->ctx.table); table 6852 net/netfilter/nf_tables_api.c list_del_rcu(&trans->ctx.table->list); table 6871 net/netfilter/nf_tables_api.c trans->ctx.table, table 6897 net/netfilter/nf_tables_api.c trans->ctx.table->use--; table 7026 net/netfilter/nf_tables_api.c trans->ctx.table); table 7027 net/netfilter/nf_tables_api.c trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; table 7031 net/netfilter/nf_tables_api.c list_del_rcu(&trans->ctx.table->list); table 7035 net/netfilter/nf_tables_api.c nft_clear(trans->ctx.net, trans->ctx.table); table 7044 net/netfilter/nf_tables_api.c trans->ctx.table->use--; table 7047 net/netfilter/nf_tables_api.c trans->ctx.table, table 7052 net/netfilter/nf_tables_api.c trans->ctx.table->use++; table 7070 net/netfilter/nf_tables_api.c trans->ctx.table->use--; table 7078 net/netfilter/nf_tables_api.c trans->ctx.table->use++; table 7105 net/netfilter/nf_tables_api.c trans->ctx.table->use--; table 7110 net/netfilter/nf_tables_api.c trans->ctx.table->use++; table 7115 net/netfilter/nf_tables_api.c trans->ctx.table->use--; table 7121 net/netfilter/nf_tables_api.c trans->ctx.table->use++; table 7289 net/netfilter/nf_tables_api.c list_for_each_entry(set, &ctx->table->sets, list) { table 7501 net/netfilter/nf_tables_api.c chain = nft_chain_lookup(ctx->net, ctx->table, table 7674 net/netfilter/nf_tables_api.c nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); table 7681 net/netfilter/nf_tables_api.c ctx->table->use--; table 7691 net/netfilter/nf_tables_api.c struct nft_table *table, *nt; table 7701 net/netfilter/nf_tables_api.c list_for_each_entry_safe(table, nt, &net->nft.tables, list) { table 7702 net/netfilter/nf_tables_api.c ctx.family = table->family; table 7704 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) table 7705 net/netfilter/nf_tables_api.c nf_tables_unregister_hook(net, table, chain); table 7707 net/netfilter/nf_tables_api.c ctx.table = table; table 7708 net/netfilter/nf_tables_api.c list_for_each_entry(chain, &table->chains, list) { table 7716 net/netfilter/nf_tables_api.c list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { table 7718 net/netfilter/nf_tables_api.c table->use--; table 7721 net/netfilter/nf_tables_api.c list_for_each_entry_safe(set, ns, &table->sets, list) { table 7723 net/netfilter/nf_tables_api.c table->use--; table 7726 net/netfilter/nf_tables_api.c list_for_each_entry_safe(obj, ne, &table->objects, list) { table 7728 net/netfilter/nf_tables_api.c table->use--; table 7731 net/netfilter/nf_tables_api.c list_for_each_entry_safe(chain, nc, &table->chains, list) { table 7734 net/netfilter/nf_tables_api.c table->use--; table 7737 net/netfilter/nf_tables_api.c list_del(&table->list); table 408 net/netfilter/nf_tables_offload.c const struct nft_table *table; table 411 net/netfilter/nf_tables_offload.c list_for_each_entry(table, &net->nft.tables, list) { table 412 net/netfilter/nf_tables_offload.c if (table->family != NFPROTO_NETDEV) table 415 net/netfilter/nf_tables_offload.c list_for_each_entry(chain, &table->chains, list) { table 196 net/netfilter/nf_tables_trace.c nla_total_size(strlen(info->chain->table->name)) + table 243 net/netfilter/nf_tables_trace.c if (nla_put_string(skb, NFTA_TRACE_TABLE, info->chain->table->name)) table 38 net/netfilter/nfnetlink.c rcu_dereference_protected(table[(id)].subsys, \ table 46 net/netfilter/nfnetlink.c } table[NFNL_SUBSYS_COUNT]; table 62 net/netfilter/nfnetlink.c mutex_lock(&table[subsys_id].mutex); table 68 net/netfilter/nfnetlink.c mutex_unlock(&table[subsys_id].mutex); table 75 net/netfilter/nfnetlink.c return lockdep_is_held(&table[subsys_id].mutex); table 90 net/netfilter/nfnetlink.c if (table[n->subsys_id].subsys) { table 94 net/netfilter/nfnetlink.c rcu_assign_pointer(table[n->subsys_id].subsys, n); table 104 net/netfilter/nfnetlink.c table[n->subsys_id].subsys = NULL; table 118 net/netfilter/nfnetlink.c return rcu_dereference(table[subsys_id].subsys); table 629 net/netfilter/nfnetlink.c mutex_init(&table[i].mutex); table 318 net/netfilter/nft_chain_filter.c struct nft_table *table; table 329 net/netfilter/nft_chain_filter.c list_for_each_entry(table, &ctx.net->nft.tables, list) { table 330 net/netfilter/nft_chain_filter.c if (table->family != NFPROTO_NETDEV) table 333 net/netfilter/nft_chain_filter.c ctx.family = table->family; table 334 net/netfilter/nft_chain_filter.c ctx.table = table; table 335 net/netfilter/nft_chain_filter.c list_for_each_entry_safe(chain, nr, &table->chains, list) { table 141 net/netfilter/nft_compat.c par->table = ctx->table->name; table 321 net/netfilter/nft_compat.c ret = nft_compat_chain_validate_dependency(ctx, target->table); table 385 net/netfilter/nft_compat.c par->table = ctx->table->name; table 562 net/netfilter/nft_compat.c ret = nft_compat_chain_validate_dependency(ctx, match->table); table 154 net/netfilter/nft_dynset.c set = nft_set_lookup_global(ctx->net, ctx->table, table 169 net/netfilter/nft_flow_offload.c flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME], table 71 net/netfilter/nft_lookup.c set = nft_set_lookup_global(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET], table 38 net/netfilter/nft_objref.c obj = nft_obj_lookup(ctx->net, ctx->table, table 131 net/netfilter/nft_objref.c set = nft_set_lookup_global(ctx->net, ctx->table, table 62 net/netfilter/nft_quota.c nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0, table 414 net/netfilter/nft_set_hash.c struct hlist_head table[]; table 432 net/netfilter/nft_set_hash.c hlist_for_each_entry_rcu(he, &priv->table[hash], node) { table 452 net/netfilter/nft_set_hash.c hlist_for_each_entry_rcu(he, &priv->table[hash], node) { table 472 net/netfilter/nft_set_hash.c hlist_for_each_entry_rcu(he, &priv->table[hash], node) { table 510 net/netfilter/nft_set_hash.c hlist_for_each_entry(he, &priv->table[hash], node) { table 518 net/netfilter/nft_set_hash.c hlist_add_head_rcu(&this->node, &priv->table[hash]); table 549 net/netfilter/nft_set_hash.c hlist_for_each_entry(he, &priv->table[hash], node) { table 578 net/netfilter/nft_set_hash.c hlist_for_each_entry_rcu(he, &priv->table[i], node) { table 622 net/netfilter/nft_set_hash.c hlist_for_each_entry_safe(he, next, &priv->table[i], node) { table 475 net/netfilter/x_tables.c if (par->match->table != NULL && table 476 net/netfilter/x_tables.c strcmp(par->match->table, par->table) != 0) { table 479 net/netfilter/x_tables.c par->match->table, par->table); table 993 net/netfilter/x_tables.c if (par->target->table != NULL && table 994 net/netfilter/x_tables.c strcmp(par->target->table, par->table) != 0) { table 997 net/netfilter/x_tables.c par->target->table, par->table); table 1268 net/netfilter/x_tables.c void xt_table_unlock(struct xt_table *table) table 1270 net/netfilter/x_tables.c mutex_unlock(&xt[table->af].mutex); table 1353 net/netfilter/x_tables.c xt_replace_table(struct xt_table *table, table 1370 net/netfilter/x_tables.c private = table->private; table 1387 net/netfilter/x_tables.c table->private = newinfo; table 1416 net/netfilter/x_tables.c table->name, table->af, private->number); table 1431 net/netfilter/x_tables.c struct xt_table *t, *table; table 1434 net/netfilter/x_tables.c table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); table 1435 net/netfilter/x_tables.c if (!table) { table 1440 net/netfilter/x_tables.c mutex_lock(&xt[table->af].mutex); table 1442 net/netfilter/x_tables.c list_for_each_entry(t, &net->xt.tables[table->af], list) { table 1443 net/netfilter/x_tables.c if (strcmp(t->name, table->name) == 0) { table 1450 net/netfilter/x_tables.c table->private = bootstrap; table 1452 net/netfilter/x_tables.c if (!xt_replace_table(table, 0, newinfo, &ret)) table 1455 net/netfilter/x_tables.c private = table->private; table 1461 net/netfilter/x_tables.c list_add(&table->list, &net->xt.tables[table->af]); table 1462 net/netfilter/x_tables.c mutex_unlock(&xt[table->af].mutex); table 1463 net/netfilter/x_tables.c return table; table 1466 net/netfilter/x_tables.c mutex_unlock(&xt[table->af].mutex); table 1467 net/netfilter/x_tables.c kfree(table); table 1473 net/netfilter/x_tables.c void *xt_unregister_table(struct xt_table *table) table 1477 net/netfilter/x_tables.c mutex_lock(&xt[table->af].mutex); table 1478 net/netfilter/x_tables.c private = table->private; table 1479 net/netfilter/x_tables.c list_del(&table->list); table 1480 net/netfilter/x_tables.c mutex_unlock(&xt[table->af].mutex); table 1481 net/netfilter/x_tables.c kfree(table); table 1514 net/netfilter/x_tables.c struct xt_table *table = list_entry(v, struct xt_table, list); table 1516 net/netfilter/x_tables.c if (*table->name) table 1517 net/netfilter/x_tables.c seq_printf(seq, "%s\n", table->name); table 1696 net/netfilter/x_tables.c xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) table 1698 net/netfilter/x_tables.c unsigned int hook_mask = table->valid_hooks; table 1715 net/netfilter/x_tables.c ops[i].pf = table->af; table 1717 net/netfilter/x_tables.c ops[i].priority = table->priority; table 71 net/netfilter/xt_CHECKSUM.c .table = "mangle", table 88 net/netfilter/xt_CONNSECMARK.c if (strcmp(par->table, "mangle") != 0 && table 89 net/netfilter/xt_CONNSECMARK.c strcmp(par->table, "security") != 0) { table 91 net/netfilter/xt_CONNSECMARK.c par->table); table 314 net/netfilter/xt_CT.c .table = "raw", table 326 net/netfilter/xt_CT.c .table = "raw", table 338 net/netfilter/xt_CT.c .table = "raw", table 371 net/netfilter/xt_CT.c .table = "raw", table 118 net/netfilter/xt_DSCP.c .table = "mangle", table 127 net/netfilter/xt_DSCP.c .table = "mangle", table 134 net/netfilter/xt_DSCP.c .table = "mangle", table 143 net/netfilter/xt_DSCP.c .table = "mangle", table 130 net/netfilter/xt_HL.c .table = "mangle", table 140 net/netfilter/xt_HL.c .table = "mangle", table 79 net/netfilter/xt_MASQUERADE.c .table = "nat", table 90 net/netfilter/xt_MASQUERADE.c .table = "nat", table 126 net/netfilter/xt_NETMAP.c .table = "nat", table 141 net/netfilter/xt_NETMAP.c .table = "nat", table 76 net/netfilter/xt_REDIRECT.c .table = "nat", table 89 net/netfilter/xt_REDIRECT.c .table = "nat", table 83 net/netfilter/xt_SECMARK.c if (strcmp(par->table, "mangle") != 0 && table 84 net/netfilter/xt_SECMARK.c strcmp(par->table, "security") != 0) { table 86 net/netfilter/xt_SECMARK.c par->table); table 116 net/netfilter/xt_TCPOPTSTRIP.c .table = "mangle", table 126 net/netfilter/xt_TCPOPTSTRIP.c .table = "mangle", table 226 net/netfilter/xt_TPROXY.c .table = "mangle", table 237 net/netfilter/xt_TPROXY.c .table = "mangle", table 249 net/netfilter/xt_TPROXY.c .table = "mangle", table 36 net/netfilter/xt_TRACE.c .table = "raw", table 159 net/netfilter/xt_nat.c .table = "nat", table 172 net/netfilter/xt_nat.c .table = "nat", table 184 net/netfilter/xt_nat.c .table = "nat", table 196 net/netfilter/xt_nat.c .table = "nat", table 208 net/netfilter/xt_nat.c .table = "nat", table 220 net/netfilter/xt_nat.c .table = "nat", table 124 net/netfilter/xt_recent.c recent_entry_lookup(const struct recent_table *table, table 136 net/netfilter/xt_recent.c list_for_each_entry(e, &table->iphash[h], list) table 467 net/netfilter/xt_recent.c const struct recent_table *table; table 475 net/netfilter/xt_recent.c const struct recent_table *t = st->table; table 491 net/netfilter/xt_recent.c const struct recent_table *t = st->table; table 514 net/netfilter/xt_recent.c const struct recent_table *t = st->table; table 546 net/netfilter/xt_recent.c st->table = PDE_DATA(inode); table 462 net/netfilter/xt_set.c if (strncmp(par->table, "mangle", 7)) { table 497 net/netlink/af_netlink.c static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid, table 503 net/netlink/af_netlink.c return rhashtable_lookup_fast(&table->hash, &arg, table 507 net/netlink/af_netlink.c static int __netlink_insert(struct netlink_table *table, struct sock *sk) table 512 net/netlink/af_netlink.c return rhashtable_lookup_insert_key(&table->hash, &arg, table 519 net/netlink/af_netlink.c struct netlink_table *table = &nl_table[protocol]; table 523 net/netlink/af_netlink.c sk = __netlink_lookup(table, portid, net); table 559 net/netlink/af_netlink.c struct netlink_table *table = &nl_table[sk->sk_protocol]; table 571 net/netlink/af_netlink.c err = __netlink_insert(table, sk); table 595 net/netlink/af_netlink.c struct netlink_table *table; table 597 net/netlink/af_netlink.c table = &nl_table[sk->sk_protocol]; table 598 net/netlink/af_netlink.c if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, table 809 net/netlink/af_netlink.c struct netlink_table *table = &nl_table[sk->sk_protocol]; table 818 net/netlink/af_netlink.c ok = !__netlink_lookup(table, portid, net); table 162 net/openvswitch/datapath.c ovs_flow_tbl_destroy(&dp->table); table 230 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit); table 661 net/openvswitch/datapath.c stats->n_flows = ovs_flow_tbl_count(&dp->table); table 662 net/openvswitch/datapath.c mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table); table 974 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); table 976 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key); table 981 net/openvswitch/datapath.c error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask); table 1016 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_exact(&dp->table, table 1182 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid); table 1184 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); table 1274 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); table 1276 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); table 1329 net/openvswitch/datapath.c err = ovs_flow_tbl_flush(&dp->table); table 1334 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); table 1336 net/openvswitch/datapath.c flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); table 1342 net/openvswitch/datapath.c ovs_flow_tbl_remove(&dp->table, flow); table 1397 net/openvswitch/datapath.c ti = rcu_dereference(dp->table.ti); table 1615 net/openvswitch/datapath.c err = ovs_flow_tbl_init(&dp->table); table 1693 net/openvswitch/datapath.c ovs_flow_tbl_destroy(&dp->table); table 69 net/openvswitch/datapath.h struct flow_table table; table 96 net/openvswitch/flow_table.c int ovs_flow_tbl_count(const struct flow_table *table) table 98 net/openvswitch/flow_table.c return table->count; table 167 net/openvswitch/flow_table.c int ovs_flow_tbl_init(struct flow_table *table) table 180 net/openvswitch/flow_table.c rcu_assign_pointer(table->ti, ti); table 181 net/openvswitch/flow_table.c rcu_assign_pointer(table->ufid_ti, ufid_ti); table 182 net/openvswitch/flow_table.c INIT_LIST_HEAD(&table->mask_list); table 183 net/openvswitch/flow_table.c table->last_rehash = jiffies; table 184 net/openvswitch/flow_table.c table->count = 0; table 185 net/openvswitch/flow_table.c table->ufid_count = 0; table 241 net/openvswitch/flow_table.c void ovs_flow_tbl_destroy(struct flow_table *table) table 243 net/openvswitch/flow_table.c struct table_instance *ti = rcu_dereference_raw(table->ti); table 244 net/openvswitch/flow_table.c struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); table 529 net/openvswitch/flow_table.c int ovs_flow_tbl_num_masks(const struct flow_table *table) table 534 net/openvswitch/flow_table.c list_for_each_entry(mask, &table->mask_list, list) table 565 net/openvswitch/flow_table.c void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) table 567 net/openvswitch/flow_table.c struct table_instance *ti = ovsl_dereference(table->ti); table 568 net/openvswitch/flow_table.c struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); table 570 net/openvswitch/flow_table.c BUG_ON(table->count == 0); table 572 net/openvswitch/flow_table.c table->count--; table 575 net/openvswitch/flow_table.c table->ufid_count--; table 581 net/openvswitch/flow_table.c flow_mask_remove(table, flow->mask); table 645 net/openvswitch/flow_table.c static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) table 651 net/openvswitch/flow_table.c ti = ovsl_dereference(table->ti); table 653 net/openvswitch/flow_table.c table->count++; table 656 net/openvswitch/flow_table.c if (table->count > ti->n_buckets) table 658 net/openvswitch/flow_table.c else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) table 662 net/openvswitch/flow_table.c rcu_assign_pointer(table->ti, new_ti); table 664 net/openvswitch/flow_table.c table->last_rehash = jiffies; table 669 net/openvswitch/flow_table.c static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) table 674 net/openvswitch/flow_table.c ti = ovsl_dereference(table->ufid_ti); table 676 net/openvswitch/flow_table.c table->ufid_count++; table 679 net/openvswitch/flow_table.c if (table->ufid_count > ti->n_buckets) { table 684 net/openvswitch/flow_table.c rcu_assign_pointer(table->ufid_ti, new_ti); table 691 net/openvswitch/flow_table.c int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, table 696 net/openvswitch/flow_table.c err = flow_mask_insert(table, flow, mask); table 699 net/openvswitch/flow_table.c flow_key_insert(table, flow); table 701 net/openvswitch/flow_table.c flow_ufid_insert(table, flow); table 52 net/openvswitch/flow_table.h int ovs_flow_tbl_count(const struct flow_table *table); table 53 net/openvswitch/flow_table.h void ovs_flow_tbl_destroy(struct flow_table *table); table 56 net/openvswitch/flow_table.h int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, table 58 net/openvswitch/flow_table.h void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); table 59 net/openvswitch/flow_table.h int ovs_flow_tbl_num_masks(const struct flow_table *table); table 60 net/openvswitch/flow_table.h struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, table 26 net/phonet/pn_dev.c struct net_device __rcu *table[64]; table 265 net/phonet/pn_dev.c if (rcu_access_pointer(pnn->routes.table[i]) == dev) { table 266 net/phonet/pn_dev.c RCU_INIT_POINTER(pnn->routes.table[i], NULL); table 367 net/phonet/pn_dev.c if (routes->table[daddr] == NULL) { table 368 net/phonet/pn_dev.c rcu_assign_pointer(routes->table[daddr], dev); table 383 net/phonet/pn_dev.c if (rcu_access_pointer(routes->table[daddr]) == dev) table 384 net/phonet/pn_dev.c RCU_INIT_POINTER(routes->table[daddr], NULL); table 403 net/phonet/pn_dev.c dev = rcu_dereference(routes->table[daddr]); table 415 net/phonet/pn_dev.c dev = rcu_dereference(routes->table[daddr]); table 51 net/phonet/sysctl.c static int proc_local_port_range(struct ctl_table *table, int write, table 60 net/phonet/sysctl.c .mode = table->mode, table 34 net/sched/act_ipt.c char *table, unsigned int hook) table 49 net/sched/act_ipt.c par.table = table; table 36 net/sched/cls_route.c struct route4_bucket __rcu *table[256 + 1]; table 162 net/sched/cls_route.c b = rcu_dereference_bh(head->table[h]); table 230 net/sched/cls_route.c b = rtnl_dereference(head->table[h1]); table 287 net/sched/cls_route.c b = rtnl_dereference(head->table[h1]); table 304 net/sched/cls_route.c RCU_INIT_POINTER(head->table[h1], NULL); table 356 net/sched/cls_route.c RCU_INIT_POINTER(head->table[to_hash(h)], NULL); table 365 net/sched/cls_route.c if (rcu_access_pointer(head->table[h1])) { table 428 net/sched/cls_route.c b = rtnl_dereference(head->table[h1]); table 434 net/sched/cls_route.c rcu_assign_pointer(head->table[h1], b); table 532 net/sched/cls_route.c b = rtnl_dereference(head->table[th]); table 571 net/sched/cls_route.c struct route4_bucket *b = rtnl_dereference(head->table[h]); table 51 net/sched/em_ipt.c mtpar.table = "filter"; table 61 net/sched/sch_gred.c static inline int gred_wred_mode(struct gred_sched *table) table 63 net/sched/sch_gred.c return test_bit(GRED_WRED_MODE, &table->flags); table 66 net/sched/sch_gred.c static inline void gred_enable_wred_mode(struct gred_sched *table) table 68 net/sched/sch_gred.c __set_bit(GRED_WRED_MODE, &table->flags); table 71 net/sched/sch_gred.c static inline void gred_disable_wred_mode(struct gred_sched *table) table 73 net/sched/sch_gred.c __clear_bit(GRED_WRED_MODE, &table->flags); table 76 net/sched/sch_gred.c static inline int gred_rio_mode(struct gred_sched *table) table 78 net/sched/sch_gred.c return test_bit(GRED_RIO_MODE, &table->flags); table 81 net/sched/sch_gred.c static inline void gred_enable_rio_mode(struct gred_sched *table) table 83 net/sched/sch_gred.c __set_bit(GRED_RIO_MODE, &table->flags); table 86 net/sched/sch_gred.c static inline void gred_disable_rio_mode(struct gred_sched *table) table 88 net/sched/sch_gred.c __clear_bit(GRED_RIO_MODE, &table->flags); table 93 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 97 net/sched/sch_gred.c for (i = 0; i < table->DPs; i++) { table 98 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; table 104 net/sched/sch_gred.c for (n = i + 1; n < table->DPs; n++) table 105 net/sched/sch_gred.c if (table->tab[n] && table->tab[n]->prio == q->prio) table 112 net/sched/sch_gred.c static inline unsigned int gred_backlog(struct gred_sched *table, table 116 net/sched/sch_gred.c if (gred_wred_mode(table)) table 127 net/sched/sch_gred.c static inline void gred_load_wred_set(const struct gred_sched *table, table 130 net/sched/sch_gred.c q->vars.qavg = table->wred_set.qavg; table 131 net/sched/sch_gred.c q->vars.qidlestart = table->wred_set.qidlestart; table 134 net/sched/sch_gred.c static inline void gred_store_wred_set(struct gred_sched *table, table 137 net/sched/sch_gred.c table->wred_set.qavg = q->vars.qavg; table 138 net/sched/sch_gred.c table->wred_set.qidlestart = q->vars.qidlestart; table 151 net/sched/sch_gred.c static bool gred_per_vq_red_flags_used(struct gred_sched *table) table 156 net/sched/sch_gred.c if (table->red_flags) table 159 net/sched/sch_gred.c if (table->tab[i] && table->tab[i]->red_flags) table 312 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 326 net/sched/sch_gred.c opt.set.grio_on = gred_rio_mode(table); table 327 net/sched/sch_gred.c opt.set.wred_on = gred_wred_mode(table); table 328 net/sched/sch_gred.c opt.set.dp_cnt = table->DPs; table 329 net/sched/sch_gred.c opt.set.dp_def = table->def; table 331 net/sched/sch_gred.c for (i = 0; i < table->DPs; i++) { table 332 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; table 354 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 368 net/sched/sch_gred.c if (table->tab[i]) table 369 net/sched/sch_gred.c hw_stats->stats.xstats[i] = &table->tab[i]->stats; table 376 net/sched/sch_gred.c if (!table->tab[i]) table 378 net/sched/sch_gred.c table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets; table 379 net/sched/sch_gred.c table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; table 380 net/sched/sch_gred.c table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; table 404 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 427 net/sched/sch_gred.c if (sopt->flags && gred_per_vq_red_flags_used(table)) { table 433 net/sched/sch_gred.c table->DPs = sopt->DPs; table 434 net/sched/sch_gred.c table->def = sopt->def_DP; table 435 net/sched/sch_gred.c red_flags_changed = table->red_flags != sopt->flags; table 436 net/sched/sch_gred.c table->red_flags = sopt->flags; table 446 net/sched/sch_gred.c gred_enable_rio_mode(table); table 447 net/sched/sch_gred.c gred_disable_wred_mode(table); table 449 net/sched/sch_gred.c gred_enable_wred_mode(table); table 451 net/sched/sch_gred.c gred_disable_rio_mode(table); table 452 net/sched/sch_gred.c gred_disable_wred_mode(table); table 456 net/sched/sch_gred.c for (i = 0; i < table->DPs; i++) table 457 net/sched/sch_gred.c if (table->tab[i]) table 458 net/sched/sch_gred.c table->tab[i]->red_flags = table 459 net/sched/sch_gred.c table->red_flags & GRED_VQ_RED_FLAGS; table 461 net/sched/sch_gred.c for (i = table->DPs; i < MAX_DPs; i++) { table 462 net/sched/sch_gred.c if (table->tab[i]) { table 465 net/sched/sch_gred.c gred_destroy_vq(table->tab[i]); table 466 net/sched/sch_gred.c table->tab[i] = NULL; table 480 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 481 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[dp]; table 489 net/sched/sch_gred.c table->tab[dp] = q = *prealloc; table 493 net/sched/sch_gred.c q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; table 531 net/sched/sch_gred.c static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry) table 542 net/sched/sch_gred.c table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); table 545 net/sched/sch_gred.c static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) table 553 net/sched/sch_gred.c gred_vq_apply(table, attr); table 559 net/sched/sch_gred.c static int gred_vq_validate(struct gred_sched *table, u32 cdp, table 577 net/sched/sch_gred.c if (dp >= table->DPs) { table 581 net/sched/sch_gred.c if (dp != cdp && !table->tab[dp]) { table 589 net/sched/sch_gred.c if (table->red_flags && table->red_flags != red_flags) { table 603 net/sched/sch_gred.c static int gred_vqs_validate(struct gred_sched *table, u32 cdp, table 617 net/sched/sch_gred.c err = gred_vq_validate(table, cdp, attr, extack); table 638 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 672 net/sched/sch_gred.c if (ctl->DP >= table->DPs) { table 678 net/sched/sch_gred.c err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST], table 684 net/sched/sch_gred.c if (gred_rio_mode(table)) { table 688 net/sched/sch_gred.c if (table->tab[table->def]) table 689 net/sched/sch_gred.c def_prio = table->tab[table->def]->prio; table 708 net/sched/sch_gred.c gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]); table 710 net/sched/sch_gred.c if (gred_rio_mode(table)) { table 711 net/sched/sch_gred.c gred_disable_wred_mode(table); table 713 net/sched/sch_gred.c gred_enable_wred_mode(table); table 759 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 764 net/sched/sch_gred.c .DPs = table->DPs, table 765 net/sched/sch_gred.c .def_DP = table->def, table 766 net/sched/sch_gred.c .grio = gred_rio_mode(table), table 767 net/sched/sch_gred.c .flags = table->red_flags, table 780 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; table 796 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; table 813 net/sched/sch_gred.c opt.backlog = gred_backlog(table, q, sch); table 827 net/sched/sch_gred.c if (gred_wred_mode(table)) table 828 net/sched/sch_gred.c gred_load_wred_set(table, q); table 847 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; table 870 net/sched/sch_gred.c gred_backlog(table, q, sch))) table 902 net/sched/sch_gred.c struct gred_sched *table = qdisc_priv(sch); table 905 net/sched/sch_gred.c for (i = 0; i < table->DPs; i++) { table 906 net/sched/sch_gred.c if (table->tab[i]) table 907 net/sched/sch_gred.c gred_destroy_vq(table->tab[i]); table 69 net/sched/sch_netem.c s16 table[0]; table 335 net/sched/sch_netem.c t = dist->table[rnd % dist->size]; table 794 net/sched/sch_netem.c d->table[i] = data[i]; table 1135 net/sctp/sm_sideeffect.c static printfn_t *table[] = { table 1138 net/sctp/sm_sideeffect.c printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; table 472 net/sctp/sysctl.c struct ctl_table *table; table 475 net/sctp/sysctl.c table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); table 476 net/sctp/sysctl.c if (!table) table 479 net/sctp/sysctl.c for (i = 0; table[i].data; i++) table 480 net/sctp/sysctl.c table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; table 482 net/sctp/sysctl.c net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); table 484 net/sctp/sysctl.c kfree(table); table 492 net/sctp/sysctl.c struct ctl_table *table; table 494 net/sctp/sysctl.c table = net->sctp.sysctl_header->ctl_table_arg; table 496 net/sctp/sysctl.c kfree(table); table 62 net/sunrpc/sysctl.c static int proc_do_xprt(struct ctl_table *table, int write, table 77 net/sunrpc/sysctl.c proc_dodebug(struct ctl_table *table, int write, table 116 net/sunrpc/sysctl.c *(unsigned int *) table->data = value; table 118 net/sunrpc/sysctl.c if (strcmp(table->procname, "rpc_debug") == 0) table 121 net/sunrpc/sysctl.c len = sprintf(tmpbuf, "0x%04x", *(unsigned int *) table->data); table 82 net/sunrpc/xprtrdma/svc_rdma.c static int read_reset_stat(struct ctl_table *table, int write, table 86 net/sunrpc/xprtrdma/svc_rdma.c atomic_t *stat = (atomic_t *)table->data; table 43 net/sysctl_net.c struct ctl_table *table) table 49 net/sysctl_net.c int mode = (table->mode >> 6) & 7; table 53 net/sysctl_net.c return table->mode; table 57 net/sysctl_net.c struct ctl_table *table, table 119 net/sysctl_net.c const char *path, struct ctl_table *table) table 121 net/sysctl_net.c return __register_sysctl_table(&net->sysctls, path, table); table 27 net/unix/sysctl_net_unix.c struct ctl_table *table; table 29 net/unix/sysctl_net_unix.c table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL); table 30 net/unix/sysctl_net_unix.c if (table == NULL) table 35 net/unix/sysctl_net_unix.c table[0].procname = NULL; table 37 net/unix/sysctl_net_unix.c table[0].data = &net->unx.sysctl_max_dgram_qlen; table 38 net/unix/sysctl_net_unix.c net->unx.ctl = register_net_sysctl(net, "net/unix", table); table 45 net/unix/sysctl_net_unix.c kfree(table); table 52 net/unix/sysctl_net_unix.c struct ctl_table *table; table 54 net/unix/sysctl_net_unix.c table = net->unx.ctl->ctl_table_arg; table 56 net/unix/sysctl_net_unix.c kfree(table); table 54 net/vmw_vsock/diag.c unsigned int table; table 62 net/vmw_vsock/diag.c table = cb->args[0]; table 71 net/vmw_vsock/diag.c if (table == 0) { table 97 net/vmw_vsock/diag.c table++; table 134 net/vmw_vsock/diag.c cb->args[0] = table; table 494 net/xfrm/xfrm_policy.c return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, table 511 net/xfrm/xfrm_policy.c return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, table 587 net/xfrm/xfrm_policy.c odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, table 593 net/xfrm/xfrm_policy.c rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); table 1290 net/xfrm/xfrm_policy.c odst = net->xfrm.policy_bydst[dir].table; table 4034 net/xfrm/xfrm_policy.c htab->table = xfrm_hash_alloc(sz); table 4035 net/xfrm/xfrm_policy.c if (!htab->table) table 4061 net/xfrm/xfrm_policy.c xfrm_hash_free(htab->table, sz); table 4089 net/xfrm/xfrm_policy.c WARN_ON(!hlist_empty(htab->table)); table 4090 net/xfrm/xfrm_policy.c xfrm_hash_free(htab->table, sz); table 34 net/xfrm/xfrm_state.c #define xfrm_state_deref_prot(table, net) \ table 35 net/xfrm/xfrm_state.c rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) table 46 net/xfrm/xfrm_sysctl.c struct ctl_table *table; table 50 net/xfrm/xfrm_sysctl.c table = kmemdup(xfrm_table, sizeof(xfrm_table), GFP_KERNEL); table 51 net/xfrm/xfrm_sysctl.c if (!table) table 53 net/xfrm/xfrm_sysctl.c table[0].data = &net->xfrm.sysctl_aevent_etime; table 54 net/xfrm/xfrm_sysctl.c table[1].data = &net->xfrm.sysctl_aevent_rseqth; table 55 net/xfrm/xfrm_sysctl.c table[2].data = &net->xfrm.sysctl_larval_drop; table 56 net/xfrm/xfrm_sysctl.c table[3].data = &net->xfrm.sysctl_acq_expires; table 60 net/xfrm/xfrm_sysctl.c table[0].procname = NULL; table 62 net/xfrm/xfrm_sysctl.c net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table); table 68 net/xfrm/xfrm_sysctl.c kfree(table); table 75 net/xfrm/xfrm_sysctl.c struct ctl_table *table; table 77 net/xfrm/xfrm_sysctl.c table = net->xfrm.sysctl_hdr->ctl_table_arg; table 79 net/xfrm/xfrm_sysctl.c kfree(table); table 59 scripts/kallsyms.c static struct sym_entry *table; table 278 scripts/kallsyms.c table = realloc(table, sizeof(*table) * table_size); table 279 scripts/kallsyms.c if (!table) { table 284 scripts/kallsyms.c if (read_symbol(in, &table[table_cnt]) == 0) { table 285 scripts/kallsyms.c table[table_cnt].start_pos = table_cnt; table 372 scripts/kallsyms.c offset = table[i].addr - relative_base; table 374 scripts/kallsyms.c } else if (symbol_absolute(&table[i])) { table 375 scripts/kallsyms.c offset = table[i].addr; table 378 scripts/kallsyms.c offset = relative_base - table[i].addr - 1; table 384 scripts/kallsyms.c symbol_absolute(&table[i]) ? "absolute" : "relative", table 385 scripts/kallsyms.c table[i].addr); table 389 scripts/kallsyms.c } else if (!symbol_absolute(&table[i])) { table 390 scripts/kallsyms.c if (_text <= table[i].addr) table 392 scripts/kallsyms.c table[i].addr - _text); table 395 scripts/kallsyms.c _text - table[i].addr); table 397 scripts/kallsyms.c printf("\tPTR\t%#llx\n", table[i].addr); table 427 scripts/kallsyms.c printf("\t.byte 0x%02x", table[i].len); table 428 scripts/kallsyms.c for (k = 0; k < table[i].len; k++) table 429 scripts/kallsyms.c printf(", 0x%02x", table[i].sym[k]); table 432 scripts/kallsyms.c off += table[i].len + 1; table 487 scripts/kallsyms.c if ( symbol_valid(&table[i]) ) { table 489 scripts/kallsyms.c table[pos] = table[i]; table 490 scripts/kallsyms.c learn_symbol(table[pos].sym, table[pos].len); table 493 scripts/kallsyms.c free(table[i].sym); table 519 scripts/kallsyms.c len = table[i].len; table 520 scripts/kallsyms.c p1 = table[i].sym; table 527 scripts/kallsyms.c forget_symbol(table[i].sym, len); table 546 scripts/kallsyms.c table[i].len = len; table 549 scripts/kallsyms.c learn_symbol(table[i].sym, len); table 605 scripts/kallsyms.c for (j = 0; j < table[i].len; j++) { table 606 scripts/kallsyms.c c = table[i].sym[j]; table 712 scripts/kallsyms.c qsort(table, table_cnt, sizeof(struct sym_entry), compare_symbols); table 720 scripts/kallsyms.c if (symbol_in_range(&table[i], &percpu_range, 1)) { table 726 scripts/kallsyms.c table[i].sym[0] = 'A'; table 727 scripts/kallsyms.c table[i].percpu_absolute = 1; table 738 scripts/kallsyms.c if (!symbol_absolute(&table[i]) && table 739 scripts/kallsyms.c table[i].addr < relative_base) table 740 scripts/kallsyms.c relative_base = table[i].addr; table 39 security/apparmor/domain.c if (!domain->table) table 43 security/apparmor/domain.c kzfree(domain->table[i]); table 44 security/apparmor/domain.c kzfree(domain->table); table 45 security/apparmor/domain.c domain->table = NULL; table 517 security/apparmor/domain.c for (*name = profile->file.trans.table[index]; !label && *name; table 568 security/apparmor/domain.c stack = profile->file.trans.table[xindex & AA_X_INDEX_MASK]; table 21 security/apparmor/include/domain.h char **table; table 1573 security/apparmor/lsm.c static int apparmor_dointvec(struct ctl_table *table, int write, table 1581 security/apparmor/lsm.c return proc_dointvec(table, write, buffer, lenp, ppos); table 79 security/apparmor/match.c struct table_header *table = NULL; table 104 security/apparmor/match.c table = kvzalloc(tsize, GFP_KERNEL); table 105 security/apparmor/match.c if (table) { table 106 security/apparmor/match.c table->td_id = th.td_id; table 107 security/apparmor/match.c table->td_flags = th.td_flags; table 108 security/apparmor/match.c table->td_lolen = th.td_lolen; table 110 security/apparmor/match.c UNPACK_ARRAY(table->td_data, blob, th.td_lolen, table 113 security/apparmor/match.c UNPACK_ARRAY(table->td_data, blob, th.td_lolen, table 116 security/apparmor/match.c UNPACK_ARRAY(table->td_data, blob, th.td_lolen, table 123 security/apparmor/match.c if (is_vmalloc_addr(table)) table 128 security/apparmor/match.c return table; table 130 security/apparmor/match.c kvfree(table); table 286 security/apparmor/match.c struct table_header *table = NULL; table 314 security/apparmor/match.c table = unpack_table(data, size); table 315 security/apparmor/match.c if (!table) table 318 security/apparmor/match.c switch (table->td_id) { table 320 security/apparmor/match.c if (!(table->td_flags & ACCEPT1_FLAGS(flags))) table 324 security/apparmor/match.c if (!(table->td_flags & ACCEPT2_FLAGS(flags))) table 328 security/apparmor/match.c if (table->td_flags != YYTD_DATA32) table 334 security/apparmor/match.c if (table->td_flags != YYTD_DATA16) table 338 security/apparmor/match.c if (table->td_flags != YYTD_DATA8) table 345 security/apparmor/match.c if (dfa->tables[table->td_id]) table 347 security/apparmor/match.c dfa->tables[table->td_id] = table; table 348 security/apparmor/match.c data += table_size(table->td_lolen, table->td_flags); table 349 security/apparmor/match.c size -= table_size(table->td_lolen, table->td_flags); table 350 security/apparmor/match.c table = NULL; table 365 security/apparmor/match.c kvfree(table); table 486 security/apparmor/policy_unpack.c profile->file.trans.table = kcalloc(size, sizeof(char *), table 488 security/apparmor/policy_unpack.c if (!profile->file.trans.table) table 500 security/apparmor/policy_unpack.c profile->file.trans.table[i] = str; table 32 security/min_addr.c int mmap_min_addr_handler(struct ctl_table *table, int write, table 40 security/min_addr.c ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); table 168 security/selinux/ss/mls.c levdatum = hashtab_search(p->p_levels.table, table 296 security/selinux/ss/mls.c levdatum = hashtab_search(pol->p_levels.table, sensitivity); table 315 security/selinux/ss/mls.c catdatum = hashtab_search(pol->p_cats.table, cur_cat); table 328 security/selinux/ss/mls.c rngdatum = hashtab_search(pol->p_cats.table, rngptr); table 461 security/selinux/ss/mls.c levdatum = hashtab_search(newp->p_levels.table, table 473 security/selinux/ss/mls.c catdatum = hashtab_search(newp->p_cats.table, table 199 security/selinux/ss/policydb.c hashtab_map(comdatum->permissions.table, perm_destroy, NULL); table 200 security/selinux/ss/policydb.c hashtab_destroy(comdatum->permissions.table); table 228 security/selinux/ss/policydb.c hashtab_map(cladatum->permissions.table, perm_destroy, NULL); table 229 security/selinux/ss/policydb.c hashtab_destroy(cladatum->permissions.table); table 391 security/selinux/ss/policydb.c rc = hashtab_insert(p->p_roles.table, key, role); table 514 security/selinux/ss/policydb.c hashtab_map(p->symtab[i].table, destroy_f[i], NULL); table 515 security/selinux/ss/policydb.c hashtab_destroy(p->symtab[i].table); table 679 security/selinux/ss/policydb.c hash_eval(s[i].table, symtab_name[i]); table 750 security/selinux/ss/policydb.c rc = hashtab_map(p->symtab[i].table, index_f[i], p); table 772 security/selinux/ss/policydb.c hashtab_map(p->symtab[i].table, destroy_f[i], NULL); table 773 security/selinux/ss/policydb.c hashtab_destroy(p->symtab[i].table); table 1145 security/selinux/ss/policydb.c rc = perm_read(p, comdatum->permissions.table, fp); table 1318 security/selinux/ss/policydb.c cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey); table 1326 security/selinux/ss/policydb.c rc = perm_read(p, cladatum->permissions.table, fp); table 1749 security/selinux/ss/policydb.c rc = hashtab_map(p->p_users.table, table 1754 security/selinux/ss/policydb.c rc = hashtab_map(p->p_roles.table, table 1759 security/selinux/ss/policydb.c rc = hashtab_map(p->p_types.table, table 1771 security/selinux/ss/policydb.c cladatum = hashtab_search(p->p_classes.table, name); table 1790 security/selinux/ss/policydb.c perdatum = hashtab_search(comdatum->permissions.table, table 1793 security/selinux/ss/policydb.c perdatum = hashtab_search(cladatum->permissions.table, table 2384 security/selinux/ss/policydb.c rc = read_f[i](p, p->symtab[i].table, fp); table 2772 security/selinux/ss/policydb.c buf[3] = cpu_to_le32(comdatum->permissions.table->nel); table 2781 security/selinux/ss/policydb.c rc = hashtab_map(comdatum->permissions.table, perm_write, fp); table 2880 security/selinux/ss/policydb.c if (cladatum->permissions.table) table 2881 security/selinux/ss/policydb.c buf[4] = cpu_to_le32(cladatum->permissions.table->nel); table 2899 security/selinux/ss/policydb.c rc = hashtab_map(cladatum->permissions.table, perm_write, fp); table 3462 security/selinux/ss/policydb.c buf[1] = cpu_to_le32(p->symtab[i].table->nel); table 3467 security/selinux/ss/policydb.c rc = hashtab_map(p->symtab[i].table, write_f[i], &pd); table 480 security/selinux/ss/services.c hashtab_map(common_dat->permissions.table, table 484 security/selinux/ss/services.c if (hashtab_map(tclass_dat->permissions.table, table 1403 security/selinux/ss/services.c usrdatum = hashtab_search(pol->p_users.table, scontextp); table 1419 security/selinux/ss/services.c role = hashtab_search(pol->p_roles.table, scontextp); table 1431 security/selinux/ss/services.c typdatum = hashtab_search(pol->p_types.table, scontextp); table 1978 security/selinux/ss/services.c usrdatum = hashtab_search(args->newp->p_users.table, table 1987 security/selinux/ss/services.c role = hashtab_search(args->newp->p_roles.table, table 1995 security/selinux/ss/services.c typdatum = hashtab_search(args->newp->p_types.table, table 2583 security/selinux/ss/services.c user = hashtab_search(policydb->p_users.table, username); table 2939 security/selinux/ss/services.c booldatum = hashtab_search(policydb->p_bools.table, bnames[i]); table 3158 security/selinux/ss/services.c rc = hashtab_map(policydb->p_classes.table, get_classes_callback, table 3195 security/selinux/ss/services.c match = hashtab_search(policydb->p_classes.table, class); table 3209 security/selinux/ss/services.c rc = hashtab_map(match->comdatum->permissions.table, table 3215 security/selinux/ss/services.c rc = hashtab_map(match->permissions.table, get_permissions_callback, table 3334 security/selinux/ss/services.c userdatum = hashtab_search(policydb->p_users.table, rulestr); table 3342 security/selinux/ss/services.c roledatum = hashtab_search(policydb->p_roles.table, rulestr); table 3350 security/selinux/ss/services.c typedatum = hashtab_search(policydb->p_types.table, rulestr); table 38 security/selinux/ss/symtab.c s->table = hashtab_create(symhash, symcmp, size); table 39 security/selinux/ss/symtab.c if (!s->table) table 16 security/selinux/ss/symtab.h struct hashtab *table; /* hash table (keyed on a string) */ table 432 security/yama/yama_lsm.c static int yama_dointvec_minmax(struct ctl_table *table, int write, table 441 security/yama/yama_lsm.c table_copy = *table; table 1258 sound/core/oss/mixer_oss.c static struct snd_mixer_oss_assign_table table[] = { table 1300 sound/core/oss/mixer_oss.c for (idx = 0; idx < ARRAY_SIZE(table); idx++) table 1301 sound/core/oss/mixer_oss.c snd_mixer_oss_build_input(mixer, &table[idx], 0, 0); table 37 sound/core/sgbuf.c if (!(sgbuf->table[i].addr & ~PAGE_MASK)) table 39 sound/core/sgbuf.c tmpb.area = sgbuf->table[i].buf; table 40 sound/core/sgbuf.c tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; table 41 sound/core/sgbuf.c tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; table 45 sound/core/sgbuf.c kfree(sgbuf->table); table 62 sound/core/sgbuf.c struct snd_sg_page *table; table 81 sound/core/sgbuf.c table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); table 82 sound/core/sgbuf.c if (!table) table 84 sound/core/sgbuf.c sgbuf->table = table; table 109 sound/core/sgbuf.c table->buf = tmpb.area; table 110 sound/core/sgbuf.c table->addr = tmpb.addr; table 112 sound/core/sgbuf.c table->addr |= chunk; /* mark head */ table 113 sound/core/sgbuf.c table++; table 149 sound/core/sgbuf.c pg = sg->table[start].addr >> PAGE_SHIFT; table 155 sound/core/sgbuf.c if ((sg->table[start].addr >> PAGE_SHIFT) != pg) table 159 sound/oss/dmasound/dmasound_atari.c char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8 table 172 sound/oss/dmasound/dmasound_atari.c *p++ = table[data]; table 389 sound/oss/dmasound/dmasound_atari.c char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8 table 408 sound/oss/dmasound/dmasound_atari.c data = table[c]; table 427 sound/oss/dmasound/dmasound_atari.c data = table[c] << 8; table 430 sound/oss/dmasound/dmasound_atari.c data |= table[c]; table 66 sound/oss/dmasound/dmasound_q40.c char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8: dmasound_alaw2dma8; table 74 sound/oss/dmasound/dmasound_q40.c *p = table[*p]+128; table 122 sound/oss/dmasound/dmasound_q40.c unsigned char *table = (unsigned char *) table 139 sound/oss/dmasound/dmasound_q40.c data = table[c]; table 232 sound/oss/dmasound/dmasound_q40.c unsigned char *table = (unsigned char *) table 250 sound/oss/dmasound/dmasound_q40.c data = 0x80 + table[c]; table 1772 sound/pci/ac97/ac97_codec.c static const struct ac97_codec_id *look_for_codec_id(const struct ac97_codec_id *table, table 1777 sound/pci/ac97/ac97_codec.c for (pid = table; pid->id; pid++) table 317 sound/pci/via82xx.c struct snd_dma_buffer table; table 418 sound/pci/via82xx.c if (dev->table.area == NULL) { table 424 sound/pci/via82xx.c &dev->table) < 0) table 454 sound/pci/via82xx.c ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr); table 469 sound/pci/via82xx.c ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag); table 487 sound/pci/via82xx.c if (dev->table.area) { table 488 sound/pci/via82xx.c snd_dma_free_pages(&dev->table); table 489 sound/pci/via82xx.c dev->table.area = NULL; table 842 sound/pci/via82xx.c if (ptr <= (unsigned int)viadev->table.addr) table 845 sound/pci/via82xx.c idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) % viadev->tbl_entries; table 957 sound/pci/via82xx.c outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR)); table 212 sound/pci/via82xx_modem.c struct snd_dma_buffer table; table 271 sound/pci/via82xx_modem.c if (dev->table.area == NULL) { table 277 sound/pci/via82xx_modem.c &dev->table) < 0) table 307 sound/pci/via82xx_modem.c ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr); table 324 sound/pci/via82xx_modem.c ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag); table 341 sound/pci/via82xx_modem.c if (dev->table.area) { table 342 sound/pci/via82xx_modem.c snd_dma_free_pages(&dev->table); table 343 sound/pci/via82xx_modem.c dev->table.area = NULL; table 623 sound/pci/via82xx_modem.c if (ptr <= (unsigned int)viadev->table.addr) table 626 sound/pci/via82xx_modem.c idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) % table 681 sound/pci/via82xx_modem.c outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR)); table 481 sound/ppc/tumbler.c unsigned int *table; table 497 sound/ppc/tumbler.c vol = info->table[vol]; table 561 sound/ppc/tumbler.c .table = mixer_volume_table, table 569 sound/ppc/tumbler.c .table = bass_volume_table, table 577 sound/ppc/tumbler.c .table = treble_volume_table, table 586 sound/ppc/tumbler.c .table = snapper_bass_volume_table, table 594 sound/ppc/tumbler.c .table = snapper_treble_volume_table, table 37 sound/synth/emux/emux_nrpn.c static int send_converted_effect(const struct nrpn_conv_table *table, table 45 sound/synth/emux/emux_nrpn.c if (table[i].control == type) { table 46 sound/synth/emux/emux_nrpn.c cval = table[i].convert(val); table 47 sound/synth/emux/emux_nrpn.c snd_emux_send_effect(port, chan, table[i].effect, table 28 sound/synth/emux/emux_synth.c struct snd_sf_zone **table); table 51 sound/synth/emux/emux_synth.c struct snd_sf_zone *table[SNDRV_EMUX_MAX_MULTI_VOICES]; table 64 sound/synth/emux/emux_synth.c nvoices = get_zone(emu, port, ¬e, vel, chan, table); table 70 sound/synth/emux/emux_synth.c struct snd_sf_zone *zp = table[i]; table 86 sound/synth/emux/emux_synth.c if (table[i] == NULL) table 101 sound/synth/emux/emux_synth.c vp->zone = table[i]; table 891 sound/synth/emux/emux_synth.c struct snd_sf_zone **table) table 908 sound/synth/emux/emux_synth.c table, SNDRV_EMUX_MAX_MULTI_VOICES); table 56 sound/synth/emux/soundfont.c int preset, int bank, struct snd_sf_zone **table, table 894 sound/synth/emux/soundfont.c calc_parm_search(int msec, short *table) table 899 sound/synth/emux/soundfont.c if (msec < (int)table[mid]) table 1224 sound/synth/emux/soundfont.c struct snd_sf_zone **table, int max_layers) table 1239 sound/synth/emux/soundfont.c table, max_layers, 0); table 1244 sound/synth/emux/soundfont.c table, max_layers, 0); table 1275 sound/synth/emux/soundfont.c int preset, int bank, struct snd_sf_zone **table, table 1297 sound/synth/emux/soundfont.c preset, bank, table, table 1303 sound/synth/emux/soundfont.c table[nvoices++] = zp; table 29 tools/arch/x86/lib/inat.c const insn_attr_t *table; table 34 tools/arch/x86/lib/inat.c table = inat_escape_tables[n][0]; table 35 tools/arch/x86/lib/inat.c if (!table) table 37 tools/arch/x86/lib/inat.c if (inat_has_variant(table[opcode]) && lpfx_id) { table 38 tools/arch/x86/lib/inat.c table = inat_escape_tables[n][lpfx_id]; table 39 tools/arch/x86/lib/inat.c if (!table) table 42 tools/arch/x86/lib/inat.c return table[opcode]; table 48 tools/arch/x86/lib/inat.c const insn_attr_t *table; table 53 tools/arch/x86/lib/inat.c table = inat_group_tables[n][0]; table 54 tools/arch/x86/lib/inat.c if (!table) table 56 tools/arch/x86/lib/inat.c if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) { table 57 tools/arch/x86/lib/inat.c table = inat_group_tables[n][lpfx_id]; table 58 tools/arch/x86/lib/inat.c if (!table) table 61 tools/arch/x86/lib/inat.c return table[X86_MODRM_REG(modrm)] | table 68 tools/arch/x86/lib/inat.c const insn_attr_t *table; table 72 tools/arch/x86/lib/inat.c table = inat_avx_tables[vex_m][0]; table 73 tools/arch/x86/lib/inat.c if (!table) table 75 tools/arch/x86/lib/inat.c if (!inat_is_group(table[opcode]) && vex_p) { table 77 tools/arch/x86/lib/inat.c table = inat_avx_tables[vex_m][vex_p]; table 78 tools/arch/x86/lib/inat.c if (!table) table 81 tools/arch/x86/lib/inat.c return table[opcode]; table 40 tools/bpf/bpftool/btf.c DECLARE_HASHTABLE(table, 16); table 570 tools/bpf/bpftool/btf.c hash_for_each_safe(tab->table, bkt, tmp, obj, hash) { table 667 tools/bpf/bpftool/btf.c hash_add(tab->table, &obj_node->hash, obj_node->btf_id); table 714 tools/bpf/bpftool/btf.c hash_for_each_possible(btf_prog_table->table, obj, hash, info->id) { table 721 tools/bpf/bpftool/btf.c hash_for_each_possible(btf_map_table->table, obj, hash, info->id) { table 743 tools/bpf/bpftool/btf.c hash_for_each_possible(btf_prog_table->table, obj, hash, table 752 tools/bpf/bpftool/btf.c hash_for_each_possible(btf_map_table->table, obj, hash, table 802 tools/bpf/bpftool/btf.c hash_init(btf_prog_table.table); table 803 tools/bpf/bpftool/btf.c hash_init(btf_map_table.table); table 413 tools/bpf/bpftool/common.c hash_add(tab->table, &obj_node->hash, obj_node->id); table 429 tools/bpf/bpftool/common.c hash_for_each_safe(tab->table, bkt, tmp, obj, hash) { table 370 tools/bpf/bpftool/main.c hash_init(prog_table.table); table 371 tools/bpf/bpftool/main.c hash_init(map_table.table); table 114 tools/bpf/bpftool/main.h DECLARE_HASHTABLE(table, 16); table 126 tools/bpf/bpftool/main.h int build_pinned_obj_table(struct pinned_obj_table *table, table 547 tools/bpf/bpftool/map.c if (!hash_empty(map_table.table)) { table 552 tools/bpf/bpftool/map.c hash_for_each_possible(map_table.table, obj, hash, info->id) { table 615 tools/bpf/bpftool/map.c if (!hash_empty(map_table.table)) { table 618 tools/bpf/bpftool/map.c hash_for_each_possible(map_table.table, obj, hash, info->id) { table 256 tools/bpf/bpftool/prog.c if (!hash_empty(prog_table.table)) { table 261 tools/bpf/bpftool/prog.c hash_for_each_possible(prog_table.table, obj, hash, info->id) { table 317 tools/bpf/bpftool/prog.c if (!hash_empty(prog_table.table)) { table 320 tools/bpf/bpftool/prog.c hash_for_each_possible(prog_table.table, obj, hash, info->id) { table 1406 tools/lib/traceevent/event-parse.c } table[] = { table 1420 tools/lib/traceevent/event-parse.c for (i = 0; table[i].type; i++) { table 1421 tools/lib/traceevent/event-parse.c if (!strcmp(table[i].type, name)) table 1422 tools/lib/traceevent/event-parse.c return table[i].size; table 900 tools/objtool/check.c struct rela *table) table 902 tools/objtool/check.c struct rela *rela = table; table 912 tools/objtool/check.c list_for_each_entry_from(rela, &table->sec->rela_list, list) { table 915 tools/objtool/check.c if (rela != table && rela->jump_table_start) table 93 tools/perf/builtin-trace.c struct syscall *table; table 1065 tools/perf/builtin-trace.c struct file *table; table 1126 tools/perf/builtin-trace.c struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); table 1138 tools/perf/builtin-trace.c ttrace->files.table = nfiles; table 1142 tools/perf/builtin-trace.c return ttrace->files.table + fd; table 1204 tools/perf/builtin-trace.c if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { table 1212 tools/perf/builtin-trace.c return ttrace->files.table[fd].pathname; table 1252 tools/perf/builtin-trace.c zfree(&ttrace->files.table[fd].pathname); table 1506 tools/perf/builtin-trace.c if (trace->syscalls.table == NULL) { table 1507 tools/perf/builtin-trace.c trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); table 1508 tools/perf/builtin-trace.c if (trace->syscalls.table == NULL) table 1512 tools/perf/builtin-trace.c sc = trace->syscalls.table + id; table 1824 tools/perf/builtin-trace.c if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && table 1828 tools/perf/builtin-trace.c if (trace->syscalls.table[id].name == NULL) { table 1829 tools/perf/builtin-trace.c if (trace->syscalls.table[id].nonexistent) table 1834 tools/perf/builtin-trace.c return &trace->syscalls.table[id]; table 1840 tools/perf/builtin-trace.c if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) table 1841 tools/perf/builtin-trace.c fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); table 3710 tools/perf/builtin-trace.c sc = &trace->syscalls.table[syscall_stats_entry->syscall]; table 246 tools/perf/pmu-events/jevents.c static const char *field_to_perf(struct map *table, char *map, jsmntok_t *val) table 250 tools/perf/pmu-events/jevents.c for (i = 0; table[i].json; i++) { table 251 tools/perf/pmu-events/jevents.c if (json_streq(map, val, table[i].json)) table 252 tools/perf/pmu-events/jevents.c return table[i].perf; table 35 tools/perf/pmu-events/pmu-events.h struct pmu_event *table; table 324 tools/perf/util/metricgroup.c pe = &map->table[i]; table 414 tools/perf/util/metricgroup.c pe = &map->table[i]; table 553 tools/perf/util/metricgroup.c pe = &map->table[i]; table 694 tools/perf/util/pmu.c if (!map->table) { table 767 tools/perf/util/pmu.c struct pmu_event *pe = &map->table[i++]; table 143 tools/perf/util/s390-sample-raw.c struct pmu_event *evp = map->table; table 38 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c char *signature, struct acpi_table_header **table); table 42 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c char *signature, struct acpi_table_header **table); table 44 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c static void osl_unmap_table(struct acpi_table_header *table); table 59 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table, table 67 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table, table 156 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table) table 195 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c *table = local_table; table 221 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table, table 240 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c osl_get_bios_table(signature, instance, table, address); table 245 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c instance, table, address); table 255 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c signature, instance, table, table 349 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table, table 379 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c table, address); table 768 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table, table 975 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c *table = local_table; table 1055 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c char *signature, struct acpi_table_header **table) table 1115 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c *table = mapped_table; table 1131 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c static void osl_unmap_table(struct acpi_table_header *table) table 1133 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c if (table) { table 1134 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c acpi_os_unmap_memory(table, ap_get_table_length(table)); table 1200 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c char *signature, struct acpi_table_header **table) table 1281 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c *table = local_table; table 1309 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c struct acpi_table_header **table, table 1369 tools/power/acpi/os_specific/service_layers/oslinuxtbl.c status = osl_read_table_from_file(table_filename, 0, NULL, table); table 71 tools/power/acpi/tools/acpidump/acpidump.h u8 ap_is_valid_header(struct acpi_table_header *table); table 73 tools/power/acpi/tools/acpidump/acpidump.h u8 ap_is_valid_checksum(struct acpi_table_header *table); table 75 tools/power/acpi/tools/acpidump/acpidump.h u32 ap_get_table_length(struct acpi_table_header *table); table 82 tools/power/acpi/tools/acpidump/acpidump.h int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance); table 15 tools/power/acpi/tools/acpidump/apdump.c ap_dump_table_buffer(struct acpi_table_header *table, table 30 tools/power/acpi/tools/acpidump/apdump.c u8 ap_is_valid_header(struct acpi_table_header *table) table 33 tools/power/acpi/tools/acpidump/apdump.c if (!ACPI_VALIDATE_RSDP_SIG(table->signature)) { table 37 tools/power/acpi/tools/acpidump/apdump.c if (!acpi_ut_valid_nameseg(table->signature)) { table 40 tools/power/acpi/tools/acpidump/apdump.c *(u32 *)table->signature); table 46 tools/power/acpi/tools/acpidump/apdump.c if (table->length < sizeof(struct acpi_table_header)) { table 48 tools/power/acpi/tools/acpidump/apdump.c table->length); table 68 tools/power/acpi/tools/acpidump/apdump.c u8 ap_is_valid_checksum(struct acpi_table_header *table) table 73 tools/power/acpi/tools/acpidump/apdump.c if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { table 78 tools/power/acpi/tools/acpidump/apdump.c rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); table 81 tools/power/acpi/tools/acpidump/apdump.c status = acpi_tb_verify_checksum(table, table->length); table 86 tools/power/acpi/tools/acpidump/apdump.c table->signature); table 104 tools/power/acpi/tools/acpidump/apdump.c u32 ap_get_table_length(struct acpi_table_header *table) table 110 tools/power/acpi/tools/acpidump/apdump.c if (!ap_is_valid_header(table)) { table 114 tools/power/acpi/tools/acpidump/apdump.c if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { table 115 tools/power/acpi/tools/acpidump/apdump.c rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); table 121 tools/power/acpi/tools/acpidump/apdump.c return (table->length); table 140 tools/power/acpi/tools/acpidump/apdump.c ap_dump_table_buffer(struct acpi_table_header *table, table 145 tools/power/acpi/tools/acpidump/apdump.c table_length = ap_get_table_length(table); table 150 tools/power/acpi/tools/acpidump/apdump.c acpi_tb_print_table_header(address, table); table 157 tools/power/acpi/tools/acpidump/apdump.c return (ap_write_to_binary_file(table, instance)); table 166 tools/power/acpi/tools/acpidump/apdump.c table->signature, ACPI_FORMAT_UINT64(address)); table 169 tools/power/acpi/tools/acpidump/apdump.c ACPI_CAST_PTR(u8, table), table_length, table 190 tools/power/acpi/tools/acpidump/apdump.c struct acpi_table_header *table; table 201 tools/power/acpi/tools/acpidump/apdump.c acpi_os_get_table_by_index(i, &table, &instance, &address); table 221 tools/power/acpi/tools/acpidump/apdump.c table_status = ap_dump_table_buffer(table, instance, address); table 222 tools/power/acpi/tools/acpidump/apdump.c ACPI_FREE(table); table 249 tools/power/acpi/tools/acpidump/apdump.c struct acpi_table_header *table; table 264 tools/power/acpi/tools/acpidump/apdump.c status = acpi_os_get_table_by_address(address, &table); table 272 tools/power/acpi/tools/acpidump/apdump.c table_status = ap_dump_table_buffer(table, 0, address); table 273 tools/power/acpi/tools/acpidump/apdump.c ACPI_FREE(table); table 294 tools/power/acpi/tools/acpidump/apdump.c struct acpi_table_header *table; table 323 tools/power/acpi/tools/acpidump/apdump.c &table, &address); table 338 tools/power/acpi/tools/acpidump/apdump.c table_status = ap_dump_table_buffer(table, instance, address); table 339 tools/power/acpi/tools/acpidump/apdump.c ACPI_FREE(table); table 365 tools/power/acpi/tools/acpidump/apdump.c struct acpi_table_header *table; table 371 tools/power/acpi/tools/acpidump/apdump.c table = ap_get_table_from_file(pathname, &file_size); table 372 tools/power/acpi/tools/acpidump/apdump.c if (!table) { table 376 tools/power/acpi/tools/acpidump/apdump.c if (!acpi_ut_valid_nameseg(table->signature)) { table 384 tools/power/acpi/tools/acpidump/apdump.c if (table->length > file_size) { table 387 tools/power/acpi/tools/acpidump/apdump.c table->length, file_size, pathname); table 394 tools/power/acpi/tools/acpidump/apdump.c pathname, table->signature, file_size, file_size); table 397 tools/power/acpi/tools/acpidump/apdump.c table_status = ap_dump_table_buffer(table, 0, 0); table 400 tools/power/acpi/tools/acpidump/apdump.c ACPI_FREE(table); table 104 tools/power/acpi/tools/acpidump/apfiles.c int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance) table 114 tools/power/acpi/tools/acpidump/apfiles.c table_length = ap_get_table_length(table); table 118 tools/power/acpi/tools/acpidump/apfiles.c if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { table 121 tools/power/acpi/tools/acpidump/apfiles.c ACPI_COPY_NAMESEG(filename, table->signature); table 142 tools/power/acpi/tools/acpidump/apfiles.c table->signature, filename, table->length, table 143 tools/power/acpi/tools/acpidump/apfiles.c table->length); table 154 tools/power/acpi/tools/acpidump/apfiles.c actual = fwrite(table, 1, table_length, file); table 402 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c int table = 117; table 418 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c (void *)&table, sizeof(table)); table 722 tools/testing/selftests/x86/sigreturn.c const char *table; table 724 tools/testing/selftests/x86/sigreturn.c table = "GDT"; table 726 tools/testing/selftests/x86/sigreturn.c table = "LDT"; table 728 tools/testing/selftests/x86/sigreturn.c table = "IDT"; table 730 tools/testing/selftests/x86/sigreturn.c table = "???"; table 733 tools/testing/selftests/x86/sigreturn.c table, src, sig_err >> 3);