h 167 arch/alpha/include/asm/core_lca.h struct el_common h; /* common logout header */ h 176 arch/alpha/include/asm/core_lca.h struct el_common h; /* common logout header */ h 82 arch/alpha/include/asm/core_mcpcia.h #define MCPCIA_HOSE2MID(h) ((h) + 4) h 298 arch/alpha/include/asm/core_titan.h #define TITAN_HOSE(h) (((unsigned long)(h)) << TITAN_HOSE_SHIFT) h 300 arch/alpha/include/asm/core_titan.h #define TITAN_MEM(h) (TITAN_BASE+TITAN_HOSE(h)+0x000000000UL) h 301 arch/alpha/include/asm/core_titan.h #define _TITAN_IACK_SC(h) (TITAN_BASE+TITAN_HOSE(h)+0x1F8000000UL) h 302 arch/alpha/include/asm/core_titan.h #define TITAN_IO(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FC000000UL) h 303 arch/alpha/include/asm/core_titan.h #define TITAN_CONF(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FE000000UL) h 256 arch/alpha/include/asm/core_tsunami.h #define TSUNAMI_HOSE(h) (((unsigned long)(h)) << 33) h 259 arch/alpha/include/asm/core_tsunami.h #define TSUNAMI_MEM(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x000000000UL) h 260 arch/alpha/include/asm/core_tsunami.h #define _TSUNAMI_IACK_SC(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1F8000000UL) h 261 arch/alpha/include/asm/core_tsunami.h #define TSUNAMI_IO(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FC000000UL) h 262 arch/alpha/include/asm/core_tsunami.h #define TSUNAMI_CONF(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FE000000UL) h 228 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_HOSE(h) ((long)(h) << 33) h 231 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h)) h 233 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL) h 234 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL) h 235 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL) h 261 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_pci(q,h) \ h 262 arch/alpha/include/asm/core_wildfire.h ((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23))) h 211 arch/alpha/include/asm/hwrpb.h hwrpb_update_checksum(struct hwrpb_struct *h) h 214 arch/alpha/include/asm/hwrpb.h for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l) h 216 arch/alpha/include/asm/hwrpb.h h->chksum = sum; h 78 arch/alpha/kernel/console.c int h = (pu64[30] >> 24) & 0xff; /* console hose # */ h 85 arch/alpha/kernel/console.c if (hose->index == h) break; h 89 arch/alpha/kernel/console.c printk("Console graphics on hose %d\n", h); h 116 arch/alpha/kernel/core_marvel.c int h; h 130 arch/alpha/kernel/core_marvel.c for (h = 0; h < 4; h++) { h 131 arch/alpha/kernel/core_marvel.c io7->ports[h].io7 = io7; h 132 arch/alpha/kernel/core_marvel.c io7->ports[h].port = h; h 133 arch/alpha/kernel/core_marvel.c io7->ports[h].enabled = 0; /* default to disabled */ h 384 arch/alpha/kernel/core_marvel.c int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */ h 393 arch/alpha/kernel/core_marvel.c printk("console graphics is on hose %d (console)\n", h); h 403 arch/alpha/kernel/core_marvel.c pid = h >> 2; h 404 arch/alpha/kernel/core_marvel.c port = h & 3; h 1085 arch/alpha/kernel/core_marvel.c struct pci_controller *h; h 1091 arch/alpha/kernel/core_marvel.c h = io7->ports[IO7_AGP_PORT].hose; h 1092 arch/alpha/kernel/core_marvel.c addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0); h 1095 arch/alpha/kernel/core_marvel.c hose = h; h 254 arch/alpha/kernel/core_mcpcia.c mcpcia_probe_hose(int h) h 257 arch/alpha/kernel/core_mcpcia.c int mid = MCPCIA_HOSE2MID(h); h 289 arch/alpha/kernel/core_mcpcia.c mcpcia_new_hose(int h) h 293 arch/alpha/kernel/core_mcpcia.c int mid = MCPCIA_HOSE2MID(h); h 296 arch/alpha/kernel/core_mcpcia.c if (h == 0) h 309 arch/alpha/kernel/core_mcpcia.c hose->index = h; h 313 arch/alpha/kernel/core_mcpcia.c io->name = pci_io_names[h]; h 318 arch/alpha/kernel/core_mcpcia.c mem->name = pci_mem_names[h]; h 327 arch/alpha/kernel/core_mcpcia.c printk(KERN_ERR "Failed to request IO on hose %d\n", h); h 329 arch/alpha/kernel/core_mcpcia.c printk(KERN_ERR "Failed to request MEM on hose %d\n", h); h 331 arch/alpha/kernel/core_mcpcia.c printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h); h 425 arch/alpha/kernel/core_mcpcia.c int h; h 429 arch/alpha/kernel/core_mcpcia.c for (h = 0; h < MCPCIA_MAX_HOSES; ++h) { h 430 arch/alpha/kernel/core_mcpcia.c if (mcpcia_probe_hose(h)) { h 431 arch/alpha/kernel/core_mcpcia.c if (h != 0) h 432 arch/alpha/kernel/core_mcpcia.c mcpcia_new_hose(h); h 458 arch/alpha/kernel/core_titan.c int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT; h 472 arch/alpha/kernel/core_titan.c h = pci_vga_hose->index; h 481 arch/alpha/kernel/core_titan.c if (hose->index == h) h 160 arch/alpha/kernel/err_common.c struct el_subpacket_handler *h = subpacket_handler_list; h 162 arch/alpha/kernel/err_common.c for (; h && h->class != header->class; h = h->next); h 163 arch/alpha/kernel/err_common.c if (h) next = h->handler(header); h 304 arch/alpha/kernel/err_common.c struct el_subpacket_handler *h = subpacket_handler_list; h 306 arch/alpha/kernel/err_common.c if (h == NULL) subpacket_handler_list = new; h 308 arch/alpha/kernel/err_common.c for (; h->next != NULL; h = h->next) { h 309 arch/alpha/kernel/err_common.c if (h->class == new->class || h == new) { h 315 arch/alpha/kernel/err_common.c h->next = new; h 32 arch/alpha/kernel/err_impl.h #define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)} h 55 arch/alpha/kernel/sys_rawhide.c #define hose_exists(h) \ h 56 arch/alpha/kernel/sys_rawhide.c (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) h 174 arch/alpha/kernel/sys_rawhide.c unsigned int h = hose->index; h 175 arch/alpha/kernel/sys_rawhide.c unsigned int mask = hose_irq_masks[h]; h 177 arch/alpha/kernel/sys_rawhide.c cached_irq_masks[h] = mask; h 178 arch/alpha/kernel/sys_rawhide.c *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask; h 179 arch/alpha/kernel/sys_rawhide.c *(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0; h 309 arch/arc/include/asm/atomic.h #include <asm-generic/atomic64.h> h 22 arch/arc/include/asm/processor.h unsigned int l, h; h 30 arch/arm/crypto/ghash-ce-glue.c u64 h[2]; h 149 arch/arm/crypto/ghash-ce-glue.c static void ghash_reflect(u64 h[], const be128 *k) h 153 arch/arm/crypto/ghash-ce-glue.c h[0] = (be64_to_cpu(k->b) << 1) | carry; h 154 arch/arm/crypto/ghash-ce-glue.c h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63); h 157 arch/arm/crypto/ghash-ce-glue.c h[1] ^= 0xc200000000000000UL; h 164 arch/arm/crypto/ghash-ce-glue.c be128 h; h 173 arch/arm/crypto/ghash-ce-glue.c ghash_reflect(key->h, &key->k); h 175 arch/arm/crypto/ghash-ce-glue.c h = key->k; h 176 arch/arm/crypto/ghash-ce-glue.c gf128mul_lle(&h, &key->k); h 177 arch/arm/crypto/ghash-ce-glue.c ghash_reflect(key->h2, &h); h 179 arch/arm/crypto/ghash-ce-glue.c gf128mul_lle(&h, &key->k); h 180 arch/arm/crypto/ghash-ce-glue.c ghash_reflect(key->h3, &h); h 182 arch/arm/crypto/ghash-ce-glue.c gf128mul_lle(&h, &key->k); h 183 arch/arm/crypto/ghash-ce-glue.c ghash_reflect(key->h4, &h); h 16 arch/arm/include/asm/arch_timer.h #define has_erratum_handler(h) (false) h 17 arch/arm/include/asm/arch_timer.h #define erratum_handler(h) (arch_timer_##h) h 2838 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c struct omap_hwmod_ocp_if **h = NULL, **h_gp = NULL, **h_sham = NULL; h 2860 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = omap34xx_hwmod_ocp_ifs; h 2865 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = am35xx_hwmod_ocp_ifs; h 2871 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = omap36xx_hwmod_ocp_ifs; h 2880 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c r = omap_hwmod_register_links(h); h 2915 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = NULL; h 2917 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = omap3430es1_hwmod_ocp_ifs; h 2921 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = omap3430es2plus_hwmod_ocp_ifs; h 2924 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c if (h) { h 2925 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c r = omap_hwmod_register_links(h); h 2930 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = NULL; h 2933 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = omap3430_pre_es3_hwmod_ocp_ifs; h 2936 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c h = omap3430_es3plus_hwmod_ocp_ifs; h 2939 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c if (h) h 2940 arch/arm/mach-omap2/omap_hwmod_3xxx_data.c r = omap_hwmod_register_links(h); h 121 arch/arm/mach-pxa/irq.c static int pxa_irq_map(struct irq_domain *h, unsigned int virq, h 345 arch/arm/probes/decode.c const struct decode_header *h) h 359 arch/arm/probes/decode.c retval = checker_func(insn, asi, h); h 416 arch/arm/probes/decode.c const struct decode_header *h = (struct decode_header *)table; h 445 arch/arm/probes/decode.c for (;; h = next) { h 446 arch/arm/probes/decode.c enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; h 447 arch/arm/probes/decode.c u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS; h 453 arch/arm/probes/decode.c ((uintptr_t)h + decode_struct_sizes[type]); h 455 arch/arm/probes/decode.c if (!matched && (insn & h->mask.bits) != h->value.bits) h 464 arch/arm/probes/decode.c struct decode_table *d = (struct decode_table *)h; h 471 arch/arm/probes/decode.c struct decode_custom *d = (struct decode_custom *)h; h 474 arch/arm/probes/decode.c err = run_checkers(checkers, action, origin_insn, asi, h); h 477 arch/arm/probes/decode.c return actions[action].decoder(insn, asi, h); h 482 arch/arm/probes/decode.c struct decode_simulate *d = (struct decode_simulate *)h; h 485 arch/arm/probes/decode.c err = run_checkers(checkers, action, origin_insn, asi, h); h 494 arch/arm/probes/decode.c struct decode_emulate *d = (struct decode_emulate *)h; h 497 arch/arm/probes/decode.c err = run_checkers(checkers, action, origin_insn, asi, h); h 502 arch/arm/probes/decode.c return actions[action].decoder(insn, asi, h); h 126 arch/arm/probes/kprobes/actions-common.c const struct decode_header *h) h 15 arch/arm/probes/kprobes/checkers-arm.c const struct decode_header *h) h 95 arch/arm/probes/kprobes/checkers-arm.c const struct decode_header *h) h 103 arch/arm/probes/kprobes/checkers-arm.c const struct decode_header *h) h 105 arch/arm/probes/kprobes/checkers-arm.c u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS; h 119 arch/arm/probes/kprobes/checkers-arm.c const struct decode_header *h) h 129 arch/arm/probes/kprobes/checkers-arm.c const struct decode_header *h) h 150 arch/arm/probes/kprobes/checkers-arm.c const struct decode_header *h) h 153 arch/arm/probes/kprobes/checkers-arm.c arm_check_regs_normal(insn, asi, h); h 15 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 23 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 32 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 45 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 54 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 64 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 73 arch/arm/probes/kprobes/checkers-common.c const struct decode_header *h) h 15 arch/arm/probes/kprobes/checkers-thumb.c const struct decode_header *h) h 89 arch/arm/probes/kprobes/checkers-thumb.c const struct decode_header *h) h 29 arch/arm/probes/kprobes/core.h const struct decode_header *h); h 600 arch/arm/probes/kprobes/test-core.c const struct decode_header *h = (struct decode_header *)table; h 604 arch/arm/probes/kprobes/test-core.c enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; h 609 arch/arm/probes/kprobes/test-core.c result = fn(h, args); h 613 arch/arm/probes/kprobes/test-core.c h = (struct decode_header *) h 614 arch/arm/probes/kprobes/test-core.c ((uintptr_t)h + decode_struct_sizes[type]); h 619 arch/arm/probes/kprobes/test-core.c static int table_test_fail(const struct decode_header *h, const char* message) h 623 arch/arm/probes/kprobes/test-core.c message, h->mask.bits, h->value.bits); h 633 arch/arm/probes/kprobes/test-core.c static int table_test_fn(const struct decode_header *h, void *args) h 636 arch/arm/probes/kprobes/test-core.c enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; h 638 arch/arm/probes/kprobes/test-core.c if (h->value.bits & ~h->mask.bits) h 639 arch/arm/probes/kprobes/test-core.c return table_test_fail(h, "Match value has bits not in mask"); h 641 arch/arm/probes/kprobes/test-core.c if ((h->mask.bits & a->parent_mask) != a->parent_mask) h 642 arch/arm/probes/kprobes/test-core.c return table_test_fail(h, "Mask has bits not in parent mask"); h 644 arch/arm/probes/kprobes/test-core.c if ((h->value.bits ^ a->parent_value) & a->parent_mask) h 645 arch/arm/probes/kprobes/test-core.c return table_test_fail(h, "Value is inconsistent with parent"); h 648 arch/arm/probes/kprobes/test-core.c struct decode_table *d = (struct decode_table *)h; h 650 arch/arm/probes/kprobes/test-core.c args2.parent_mask = h->mask.bits; h 651 arch/arm/probes/kprobes/test-core.c args2.parent_value = h->value.bits; h 723 arch/arm/probes/kprobes/test-core.c unsigned coverage_start_registers(const struct decode_header *h) h 728 arch/arm/probes/kprobes/test-core.c int r = (h->type_regs.bits >> (DECODE_TYPE_BITS + i)) & 0xf; h 734 arch/arm/probes/kprobes/test-core.c static int coverage_start_fn(const struct decode_header *h, void *args) h 737 arch/arm/probes/kprobes/test-core.c enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; h 747 arch/arm/probes/kprobes/test-core.c entry->header = h; h 748 arch/arm/probes/kprobes/test-core.c entry->regs = coverage_start_registers(h); h 753 arch/arm/probes/kprobes/test-core.c struct decode_table *d = (struct decode_table *)h; h 850 arch/arm/probes/kprobes/test-core.c const struct decode_header *h = entry->header; h 851 arch/arm/probes/kprobes/test-core.c enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; h 860 arch/arm/probes/kprobes/test-core.c if ((insn & h->mask.bits) != h->value.bits) h 144 arch/arm64/crypto/aes-ce-ccm-glue.c struct __packed { __be16 l; __be32 h; u16 len; } ltag; h 155 arch/arm64/crypto/aes-ce-ccm-glue.c put_unaligned_be32(len, <ag.h); h 34 arch/arm64/crypto/ghash-ce-glue.c u64 h[2]; h 214 arch/arm64/crypto/ghash-ce-glue.c static void ghash_reflect(u64 h[], const be128 *k) h 218 arch/arm64/crypto/ghash-ce-glue.c h[0] = (be64_to_cpu(k->b) << 1) | carry; h 219 arch/arm64/crypto/ghash-ce-glue.c h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63); h 222 arch/arm64/crypto/ghash-ce-glue.c h[1] ^= 0xc200000000000000UL; h 228 arch/arm64/crypto/ghash-ce-glue.c be128 h; h 233 arch/arm64/crypto/ghash-ce-glue.c ghash_reflect(key->h, &key->k); h 235 arch/arm64/crypto/ghash-ce-glue.c h = key->k; h 236 arch/arm64/crypto/ghash-ce-glue.c gf128mul_lle(&h, &key->k); h 237 arch/arm64/crypto/ghash-ce-glue.c ghash_reflect(key->h2, &h); h 239 arch/arm64/crypto/ghash-ce-glue.c gf128mul_lle(&h, &key->k); h 240 arch/arm64/crypto/ghash-ce-glue.c ghash_reflect(key->h3, &h); h 242 arch/arm64/crypto/ghash-ce-glue.c gf128mul_lle(&h, &key->k); h 243 arch/arm64/crypto/ghash-ce-glue.c ghash_reflect(key->h4, &h); h 24 arch/arm64/include/asm/arch_timer.h #define has_erratum_handler(h) \ h 28 arch/arm64/include/asm/arch_timer.h (__wa && __wa->h); \ h 31 arch/arm64/include/asm/arch_timer.h #define erratum_handler(h) \ h 35 arch/arm64/include/asm/arch_timer.h (__wa && __wa->h) ? __wa->h : arch_timer_##h; \ h 39 arch/arm64/include/asm/arch_timer.h #define has_erratum_handler(h) false h 40 arch/arm64/include/asm/arch_timer.h #define erratum_handler(h) (arch_timer_##h) h 300 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, h, , 16, , , , , K) h 304 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K) h 308 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K) h 312 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K) h 365 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, h, , 16, ) h 369 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, h, acq_, 16, a, "memory") h 373 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, h, rel_, 16, l, "memory") h 377 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, h, mb_, 16, al, "memory") h 46 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, h, , 16, , , , , , ) h 50 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory") h 54 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") h 58 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") h 251 arch/arm64/include/asm/cmpxchg.h __CMPWAIT_CASE(w, h, 16); h 17 arch/arm64/include/asm/hugetlb.h extern bool arch_hugetlb_migration_supported(struct hstate *h); h 98 arch/arm64/include/asm/percpu.h __PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse) \ h 104 arch/arm64/include/asm/percpu.h __PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse) \ h 760 arch/arm64/include/asm/sysreg.h #include <linux/build_bug.h> h 761 arch/arm64/include/asm/sysreg.h #include <linux/types.h> h 25 arch/arm64/kernel/kexec_image.c const struct arm64_image_header *h = h 28 arch/arm64/kernel/kexec_image.c if (!h || (kernel_len < sizeof(*h))) h 31 arch/arm64/kernel/kexec_image.c if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic))) h 42 arch/arm64/kernel/kexec_image.c struct arm64_image_header *h; h 59 arch/arm64/kernel/kexec_image.c h = (struct arm64_image_header *)kernel; h 60 arch/arm64/kernel/kexec_image.c if (!h->image_size) h 64 arch/arm64/kernel/kexec_image.c flags = le64_to_cpu(h->flags); h 88 arch/arm64/kernel/kexec_image.c kbuf.memsz = le64_to_cpu(h->image_size); h 89 arch/arm64/kernel/kexec_image.c text_offset = le64_to_cpu(h->text_offset); h 23 arch/arm64/mm/hugetlbpage.c bool arch_hugetlb_migration_supported(struct hstate *h) h 25 arch/arm64/mm/hugetlbpage.c size_t pagesize = huge_page_size(h); h 20 arch/c6x/include/asm/timex.h unsigned l, h; h 26 arch/c6x/include/asm/timex.h : "=b"(l), "=b"(h)); h 27 arch/c6x/include/asm/timex.h return ((cycles_t)h << 32) | l; h 73 arch/c6x/kernel/irq.c static int core_domain_map(struct irq_domain *h, unsigned int virq, h 117 arch/c6x/platforms/megamod-pic.c static int megamod_map(struct irq_domain *h, unsigned int virq, h 120 arch/c6x/platforms/megamod-pic.c struct megamod_pic *pic = h->host_data; h 210 arch/csky/include/asm/atomic.h #include <asm-generic/atomic.h> h 151 arch/h8300/include/asm/bitops.h #include <asm-generic/bitops/ffs.h> h 28 arch/ia64/include/asm/perfmon.h extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h); h 29 arch/ia64/include/asm/perfmon.h extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h); h 125 arch/ia64/kernel/perfmon.c #define PFM_CTX_TASK(h) (h)->ctx_task h 2 arch/m68k/fpsp040/fpsp.h | fpsp.h 3.3 3.3 h 11 arch/m68k/fpsp040/fpsp.h | fpsp.h --- stack frame offsets during FPSP exception handling h 42 arch/m68k/sun3x/time.c volatile struct mostek_dt *h = h 49 arch/m68k/sun3x/time.c h->csr |= C_WRITE; h 50 arch/m68k/sun3x/time.c h->sec = bin2bcd(t->tm_sec); h 51 arch/m68k/sun3x/time.c h->min = bin2bcd(t->tm_min); h 52 arch/m68k/sun3x/time.c h->hour = bin2bcd(t->tm_hour); h 53 arch/m68k/sun3x/time.c h->wday = bin2bcd(t->tm_wday); h 54 arch/m68k/sun3x/time.c h->mday = bin2bcd(t->tm_mday); h 55 arch/m68k/sun3x/time.c h->month = bin2bcd(t->tm_mon + 1); h 56 arch/m68k/sun3x/time.c h->year = bin2bcd(t->tm_year % 100); h 57 arch/m68k/sun3x/time.c h->csr &= ~C_WRITE; h 59 arch/m68k/sun3x/time.c h->csr |= C_READ; h 60 arch/m68k/sun3x/time.c t->tm_sec = bcd2bin(h->sec); h 61 arch/m68k/sun3x/time.c t->tm_min = bcd2bin(h->min); h 62 arch/m68k/sun3x/time.c t->tm_hour = bcd2bin(h->hour); h 63 arch/m68k/sun3x/time.c t->tm_wday = bcd2bin(h->wday); h 64 arch/m68k/sun3x/time.c t->tm_mday = bcd2bin(h->mday); h 65 arch/m68k/sun3x/time.c t->tm_mon = bcd2bin(h->month) - 1; h 66 arch/m68k/sun3x/time.c t->tm_year = bcd2bin(h->year); h 67 arch/m68k/sun3x/time.c h->csr &= ~C_READ; h 27 arch/microblaze/include/asm/mmu.h unsigned long h:1; /* Hash algorithm indicator */ h 154 arch/mips/alchemy/common/clock.c struct clk_hw *h; h 156 arch/mips/alchemy/common/clock.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 157 arch/mips/alchemy/common/clock.c if (!h) h 165 arch/mips/alchemy/common/clock.c h->init = &id; h 167 arch/mips/alchemy/common/clock.c return clk_register(NULL, h); h 1363 arch/mips/cavium-octeon/octeon-irq.c void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) h 1365 arch/mips/cavium-octeon/octeon-irq.c octeon_irq_ip4 = h; h 97 arch/mips/crypto/crc32-mips.c CRC32(crc, value, h); h 137 arch/mips/crypto/crc32-mips.c CRC32C(crc, value, h); h 274 arch/mips/include/asm/asmmacro.h ld.h $w\wd, \off(\base) h 310 arch/mips/include/asm/asmmacro.h st.h $w\wd, \off(\base) h 27 arch/mips/include/asm/hugetlb.h struct hstate *h = hstate_file(file); h 29 arch/mips/include/asm/hugetlb.h if (len & ~huge_page_mask(h)) h 31 arch/mips/include/asm/hugetlb.h if (addr & ~huge_page_mask(h)) h 1980 arch/mips/kernel/traps.c u16 *h; h 2039 arch/mips/kernel/traps.c h = (u16 *)(b + lui_offset); h 2040 arch/mips/kernel/traps.c *h = (handler >> 16) & 0xffff; h 2041 arch/mips/kernel/traps.c h = (u16 *)(b + ori_offset); h 2042 arch/mips/kernel/traps.c *h = (handler & 0xffff); h 2054 arch/mips/kernel/traps.c h = (u16 *)b; h 2061 arch/mips/kernel/traps.c h[0] = (insn >> 16) & 0xffff; h 2062 arch/mips/kernel/traps.c h[1] = insn & 0xffff; h 2063 arch/mips/kernel/traps.c h[2] = 0; h 2064 arch/mips/kernel/traps.c h[3] = 0; h 393 arch/mips/mm/uasm.c #include <asm/octeon/octeon.h> h 49 arch/nios2/kernel/irq.c static int irq_map(struct irq_domain *h, unsigned int virq, h 76 arch/parisc/include/asm/pci.h #define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS) h 75 arch/parisc/include/asm/psw.h unsigned int h:1; h 60 arch/parisc/kernel/ptrace.c pa_psw(task)->h = 0; h 108 arch/parisc/kernel/ptrace.c pa_psw(task)->h = 0; h 120 arch/parisc/kernel/ptrace.c pa_psw(task)->h = 0; h 29 arch/parisc/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 31 arch/parisc/mm/hugetlbpage.c if (len & ~huge_page_mask(h)) h 41 arch/parisc/mm/hugetlbpage.c addr = ALIGN(addr, huge_page_size(h)); h 24 arch/powerpc/include/asm/book3s/32/kup.h oris \gpr1, \gpr1, SR_NX@h /* set Nx */ h 60 arch/powerpc/include/asm/book3s/32/kup.h oris \gpr1, \gpr1, SR_KS@h /* set Ks */ h 90 arch/powerpc/include/asm/book3s/32/kup.h #include <linux/sched.h> h 77 arch/powerpc/include/asm/book3s/32/mmu-hash.h unsigned long h:1; /* Hash algorithm indicator */ h 59 arch/powerpc/include/asm/book3s/64/kup-radix.h #include <asm/reg.h> h 276 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, h 281 arch/powerpc/include/asm/book3s/64/mmu-hash.h if (!(h & HPTE_V_LARGE)) h 294 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) h 296 arch/powerpc/include/asm/book3s/64/mmu-hash.h return __hpte_page_size(h, l, 0); h 299 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) h 301 arch/powerpc/include/asm/book3s/64/mmu-hash.h return __hpte_page_size(h, l, 1); h 220 arch/powerpc/include/asm/kvm_book3s_64.h static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) h 224 arch/powerpc/include/asm/kvm_book3s_64.h if (!(h & HPTE_V_LARGE)) h 250 arch/powerpc/include/asm/kvm_book3s_64.h static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l) h 252 arch/powerpc/include/asm/kvm_book3s_64.h return kvmppc_hpte_page_shifts(h, l) & 0xff; h 255 arch/powerpc/include/asm/kvm_book3s_64.h static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l) h 257 arch/powerpc/include/asm/kvm_book3s_64.h int tmp = kvmppc_hpte_page_shifts(h, l); h 12 arch/powerpc/include/asm/nohash/32/kup-8xx.h lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */ h 27 arch/powerpc/include/asm/nohash/32/kup-8xx.h 999: twnei \gpr, MD_APG_KUAP@h h 34 arch/powerpc/include/asm/nohash/32/kup-8xx.h #include <asm/reg.h> h 317 arch/powerpc/include/asm/ppc_asm.h #define __AS_ATHIGH h h 441 arch/powerpc/include/asm/ppc_asm.h lis r4,KERNELBASE@h; \ h 462 arch/powerpc/include/asm/ppc_asm.h lis scratch,0x60000000@h; \ h 504 arch/powerpc/include/asm/ppc_asm.h #define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h h 505 arch/powerpc/include/asm/ppc_asm.h #define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h h 829 arch/powerpc/include/asm/ppc_asm.h lis reg,BUCSR_INIT@h; \ h 117 arch/powerpc/include/asm/sstep.h u16 h[8]; h 113 arch/powerpc/kernel/align.c u16 h[4]; h 152 arch/powerpc/kernel/align.c data.h[2] = *evr >> 16; h 153 arch/powerpc/kernel/align.c data.h[3] = regs->gpr[reg] >> 16; h 156 arch/powerpc/kernel/align.c data.h[2] = *evr & 0xffff; h 157 arch/powerpc/kernel/align.c data.h[3] = regs->gpr[reg] & 0xffff; h 198 arch/powerpc/kernel/align.c data.h[0] = temp.h[3]; h 199 arch/powerpc/kernel/align.c data.h[2] = temp.h[3]; h 203 arch/powerpc/kernel/align.c data.h[1] = temp.h[3]; h 204 arch/powerpc/kernel/align.c data.h[3] = temp.h[3]; h 207 arch/powerpc/kernel/align.c data.h[0] = temp.h[2]; h 208 arch/powerpc/kernel/align.c data.h[2] = temp.h[3]; h 212 arch/powerpc/kernel/align.c data.h[1] = temp.h[2]; h 213 arch/powerpc/kernel/align.c data.h[3] = temp.h[3]; h 220 arch/powerpc/kernel/align.c data.h[0] = temp.h[2]; h 221 arch/powerpc/kernel/align.c data.h[1] = temp.h[2]; h 222 arch/powerpc/kernel/align.c data.h[2] = temp.h[3]; h 223 arch/powerpc/kernel/align.c data.h[3] = temp.h[3]; h 241 arch/powerpc/kernel/align.c data.h[0] = swab16(data.h[0]); h 242 arch/powerpc/kernel/align.c data.h[1] = swab16(data.h[1]); h 243 arch/powerpc/kernel/align.c data.h[2] = swab16(data.h[2]); h 244 arch/powerpc/kernel/align.c data.h[3] = swab16(data.h[3]); h 250 arch/powerpc/kernel/align.c data.w[0] = (s16)data.h[1]; h 251 arch/powerpc/kernel/align.c data.w[1] = (s16)data.h[3]; h 102 arch/powerpc/kernel/head_32.h andis. r12,r12,DBCR0_IDM@h h 122 arch/powerpc/kernel/head_32.h lis r11, transfer_to_syscall@h h 138 arch/powerpc/kernel/head_booke.h andis. r12,r12,DBCR0_IDM@h h 160 arch/powerpc/kernel/head_booke.h lis r11, transfer_to_syscall@h h 168 arch/powerpc/kernel/head_booke.h lis r10, MSR_KERNEL@h h 172 arch/powerpc/kernel/head_booke.h lis r10, (MSR_KERNEL | MSR_EE)@h h 331 arch/powerpc/kernel/head_booke.h lis r10,msr@h; \ h 371 arch/powerpc/kernel/head_booke.h andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \ h 374 arch/powerpc/kernel/head_booke.h lis r10,interrupt_base@h; /* check if exception in vectors */ \ h 379 arch/powerpc/kernel/head_booke.h lis r10,interrupt_end@h; \ h 386 arch/powerpc/kernel/head_booke.h lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \ h 424 arch/powerpc/kernel/head_booke.h andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \ h 427 arch/powerpc/kernel/head_booke.h lis r10,interrupt_base@h; /* check if exception in vectors */ \ h 432 arch/powerpc/kernel/head_booke.h lis r10,interrupt_end@h; \ h 439 arch/powerpc/kernel/head_booke.h lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \ h 500 arch/powerpc/kernel/head_booke.h lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ h 818 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long j, h; h 827 arch/powerpc/kvm/book3s_64_mmu_hv.c h = rev[i].back; h 828 arch/powerpc/kvm/book3s_64_mmu_hv.c rev[h].forw = j; h 829 arch/powerpc/kvm/book3s_64_mmu_hv.c rev[j].back = h; h 163 arch/powerpc/kvm/book3s_rtas.c struct rtas_handler *h = NULL; h 176 arch/powerpc/kvm/book3s_rtas.c h = &rtas_handlers[i]; h 177 arch/powerpc/kvm/book3s_rtas.c if (rtas_name_matches(h->name, name)) { h 190 arch/powerpc/kvm/book3s_rtas.c d->handler = h; h 645 arch/powerpc/lib/rheap.c struct list_head *h; h 651 arch/powerpc/lib/rheap.c h = &info->free_list; h 655 arch/powerpc/lib/rheap.c h = &info->taken_list; h 664 arch/powerpc/lib/rheap.c list_for_each(l, h) { h 724 arch/powerpc/lib/sstep.c reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); h 800 arch/powerpc/lib/sstep.c *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); h 52 arch/powerpc/mm/book3s64/radix_hugetlbpage.c struct hstate *h = hstate_file(file); h 61 arch/powerpc/mm/book3s64/radix_hugetlbpage.c if (len & ~huge_page_mask(h)) h 75 arch/powerpc/mm/book3s64/radix_hugetlbpage.c addr = ALIGN(addr, huge_page_size(h)); h 89 arch/powerpc/mm/book3s64/radix_hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 239 arch/powerpc/mm/hugetlbpage.c int __init alloc_bootmem_huge_page(struct hstate *h) h 244 arch/powerpc/mm/hugetlbpage.c return pseries_alloc_bootmem_huge_page(h); h 246 arch/powerpc/mm/hugetlbpage.c return __alloc_bootmem_huge_page(h); h 243 arch/powerpc/mm/pgtable.c struct hstate *h = hstate_vma(vma); h 245 arch/powerpc/mm/pgtable.c psize = hstate_get_psize(h); h 247 arch/powerpc/mm/pgtable.c assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); h 15 arch/powerpc/perf/req-gen/_begin.h #define REQUEST_BEGIN CAT2_STR(REQ_GEN_PREFIX, _request-begin.h) h 16 arch/powerpc/perf/req-gen/_begin.h #define REQUEST_END CAT2_STR(REQ_GEN_PREFIX, _request-end.h) h 173 arch/powerpc/platforms/4xx/uic.c static int uic_host_map(struct irq_domain *h, unsigned int virq, h 176 arch/powerpc/platforms/4xx/uic.c struct uic *uic = h->host_data; h 123 arch/powerpc/platforms/512x/mpc5121_ads_cpld.c cpld_pic_host_match(struct irq_domain *h, struct device_node *node, h 130 arch/powerpc/platforms/512x/mpc5121_ads_cpld.c cpld_pic_host_map(struct irq_domain *h, unsigned int virq, h 110 arch/powerpc/platforms/52xx/media5200.c static int media5200_irq_map(struct irq_domain *h, unsigned int virq, h 113 arch/powerpc/platforms/52xx/media5200.c pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); h 120 arch/powerpc/platforms/52xx/media5200.c static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct, h 203 arch/powerpc/platforms/52xx/mpc52xx_gpt.c static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq, h 206 arch/powerpc/platforms/52xx/mpc52xx_gpt.c struct mpc52xx_gpt_priv *gpt = h->host_data; h 208 arch/powerpc/platforms/52xx/mpc52xx_gpt.c dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); h 215 arch/powerpc/platforms/52xx/mpc52xx_gpt.c static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct, h 220 arch/powerpc/platforms/52xx/mpc52xx_gpt.c struct mpc52xx_gpt_priv *gpt = h->host_data; h 304 arch/powerpc/platforms/52xx/mpc52xx_pic.c static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct, h 338 arch/powerpc/platforms/52xx/mpc52xx_pic.c static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, h 102 arch/powerpc/platforms/82xx/pq2ads-pci-pic.c static int pci_pic_host_map(struct irq_domain *h, unsigned int virq, h 106 arch/powerpc/platforms/82xx/pq2ads-pci-pic.c irq_set_chip_data(virq, h->host_data); h 228 arch/powerpc/platforms/85xx/socrates_fpga_pic.c static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq, h 239 arch/powerpc/platforms/85xx/socrates_fpga_pic.c static int socrates_fpga_pic_host_xlate(struct irq_domain *h, h 102 arch/powerpc/platforms/8xx/cpm1.c static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, h 88 arch/powerpc/platforms/8xx/pic.c static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq, h 99 arch/powerpc/platforms/8xx/pic.c static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, h 307 arch/powerpc/platforms/cell/axon_msi.c static int msic_host_map(struct irq_domain *h, unsigned int virq, h 310 arch/powerpc/platforms/cell/axon_msi.c irq_set_chip_data(virq, h->host_data); h 207 arch/powerpc/platforms/cell/interrupt.c static int iic_host_match(struct irq_domain *h, struct device_node *node, h 214 arch/powerpc/platforms/cell/interrupt.c static int iic_host_map(struct irq_domain *h, unsigned int virq, h 231 arch/powerpc/platforms/cell/interrupt.c static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, h 158 arch/powerpc/platforms/cell/spider-pic.c static int spider_host_map(struct irq_domain *h, unsigned int virq, h 161 arch/powerpc/platforms/cell/spider-pic.c irq_set_chip_data(virq, h->host_data); h 170 arch/powerpc/platforms/cell/spider-pic.c static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, h 97 arch/powerpc/platforms/embedded6xx/flipper-pic.c static int flipper_pic_map(struct irq_domain *h, unsigned int virq, h 100 arch/powerpc/platforms/embedded6xx/flipper-pic.c irq_set_chip_data(virq, h->host_data); h 95 arch/powerpc/platforms/embedded6xx/hlwd-pic.c static int hlwd_pic_map(struct irq_domain *h, unsigned int virq, h 98 arch/powerpc/platforms/embedded6xx/hlwd-pic.c irq_set_chip_data(virq, h->host_data); h 108 arch/powerpc/platforms/embedded6xx/hlwd-pic.c static unsigned int __hlwd_pic_get_irq(struct irq_domain *h) h 110 arch/powerpc/platforms/embedded6xx/hlwd-pic.c void __iomem *io_base = h->host_data; h 120 arch/powerpc/platforms/embedded6xx/hlwd-pic.c return irq_linear_revmap(h, irq); h 137 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h) h 144 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_gpio, cmd, h, value, mask); h 147 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h) h 156 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor); h 159 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) h 168 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask); h 171 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) h 177 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_reg32, cmd, h, offset); h 181 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) h 190 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask); h 193 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) h 199 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_reg16, cmd, h, offset); h 203 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) h 212 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask); h 215 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) h 221 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_reg8, cmd, h, offset); h 224 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h) h 230 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(delay, cmd, h, duration); h 233 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) h 242 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask); h 245 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) h 254 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask); h 257 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) h 266 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask); h 269 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) h 275 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_i2c, cmd, h, bytes); h 278 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) h 286 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob); h 290 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) h 304 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes, h 308 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) h 315 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes); h 319 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) h 328 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob); h 331 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) h 346 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes, h 351 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) h 359 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes); h 362 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) h 372 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob); h 375 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h) h 381 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode); h 385 arch/powerpc/platforms/powermac/pfunc_core.c static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) h 400 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes, h 405 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 415 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor); h 419 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 429 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor); h 432 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 442 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor); h 446 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 455 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask); h 459 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 468 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask); h 472 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 481 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask); h 485 arch/powerpc/platforms/powermac/pfunc_core.c struct pmf_handlers *h) h 495 arch/powerpc/platforms/powermac/pfunc_core.c PMF_PARSE_CALL(mask_and_compare, cmd, h, h 500 arch/powerpc/platforms/powermac/pfunc_core.c typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h); h 267 arch/powerpc/platforms/powermac/pic.c static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, h 274 arch/powerpc/platforms/powermac/pic.c static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq, h 175 arch/powerpc/platforms/powermac/smp.c static int psurge_host_map(struct irq_domain *h, unsigned int virq, h 137 arch/powerpc/platforms/powernv/opal-irqchip.c static int opal_event_match(struct irq_domain *h, struct device_node *node, h 140 arch/powerpc/platforms/powernv/opal-irqchip.c return irq_domain_get_of_node(h) == node; h 143 arch/powerpc/platforms/powernv/opal-irqchip.c static int opal_event_xlate(struct irq_domain *h, struct device_node *np, h 358 arch/powerpc/platforms/powernv/smp.c int h = get_hard_smp_processor_id(cpu); h 361 arch/powerpc/platforms/powernv/smp.c opal_quiesce(QUIESCE_HOLD, h); h 363 arch/powerpc/platforms/powernv/smp.c rc = opal_signal_system_reset(h); h 366 arch/powerpc/platforms/powernv/smp.c opal_quiesce(QUIESCE_RESUME, h); h 658 arch/powerpc/platforms/ps3/interrupt.c static int ps3_host_map(struct irq_domain *h, unsigned int virq, h 669 arch/powerpc/platforms/ps3/interrupt.c static int ps3_host_match(struct irq_domain *h, struct device_node *np, h 306 arch/powerpc/platforms/ps3/os-area.c static void _dump_header(const struct os_area_header *h, const char *func, h 309 arch/powerpc/platforms/ps3/os-area.c char str[sizeof(h->magic_num) + 1]; h 311 arch/powerpc/platforms/ps3/os-area.c dump_field(str, h->magic_num, sizeof(h->magic_num)); h 315 arch/powerpc/platforms/ps3/os-area.c h->hdr_version); h 317 arch/powerpc/platforms/ps3/os-area.c h->db_area_offset); h 319 arch/powerpc/platforms/ps3/os-area.c h->ldr_area_offset); h 321 arch/powerpc/platforms/ps3/os-area.c h->ldr_format); h 323 arch/powerpc/platforms/ps3/os-area.c h->ldr_size); h 105 arch/powerpc/platforms/pseries/hvCall_inst.c struct hcall_stats *h; h 110 arch/powerpc/platforms/pseries/hvCall_inst.c h = this_cpu_ptr(&hcall_stats[opcode / 4]); h 111 arch/powerpc/platforms/pseries/hvCall_inst.c h->tb_start = mftb(); h 112 arch/powerpc/platforms/pseries/hvCall_inst.c h->purr_start = mfspr(SPRN_PURR); h 118 arch/powerpc/platforms/pseries/hvCall_inst.c struct hcall_stats *h; h 123 arch/powerpc/platforms/pseries/hvCall_inst.c h = this_cpu_ptr(&hcall_stats[opcode / 4]); h 124 arch/powerpc/platforms/pseries/hvCall_inst.c h->num_calls++; h 125 arch/powerpc/platforms/pseries/hvCall_inst.c h->tb_total += mftb() - h->tb_start; h 126 arch/powerpc/platforms/pseries/hvCall_inst.c h->purr_total += mfspr(SPRN_PURR) - h->purr_start; h 426 arch/powerpc/platforms/pseries/ras.c struct rtas_error_log *h; h 439 arch/powerpc/platforms/pseries/ras.c h = (struct rtas_error_log *)&savep[1]; h 442 arch/powerpc/platforms/pseries/ras.c if (!rtas_error_extended(h)) { h 443 arch/powerpc/platforms/pseries/ras.c memcpy(local_paca->mce_data_buf, h, sizeof(__u64)); h 447 arch/powerpc/platforms/pseries/ras.c error_log_length = 8 + rtas_error_extended_log_length(h); h 449 arch/powerpc/platforms/pseries/ras.c memcpy(local_paca->mce_data_buf, h, len); h 215 arch/powerpc/sysdev/cpm2_pic.c static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq, h 206 arch/powerpc/sysdev/dcr.c dcr_host_mmio_t h = host; h 208 arch/powerpc/sysdev/dcr.c if (h.token == NULL) h 210 arch/powerpc/sysdev/dcr.c h.token += host.base * h.stride; h 211 arch/powerpc/sysdev/dcr.c iounmap(h.token); h 212 arch/powerpc/sysdev/dcr.c h.token = NULL; h 180 arch/powerpc/sysdev/ehv_pic.c static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, h 184 arch/powerpc/sysdev/ehv_pic.c struct device_node *of_node = irq_domain_get_of_node(h); h 188 arch/powerpc/sysdev/ehv_pic.c static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq, h 191 arch/powerpc/sysdev/ehv_pic.c struct ehv_pic *ehv_pic = h->host_data; h 217 arch/powerpc/sysdev/ehv_pic.c static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct, h 85 arch/powerpc/sysdev/fsl_msi.c static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, h 88 arch/powerpc/sysdev/fsl_msi.c struct fsl_msi *msi_data = h->host_data; h 156 arch/powerpc/sysdev/ge/ge_pic.c static int gef_pic_host_map(struct irq_domain *h, unsigned int virq, h 166 arch/powerpc/sysdev/ge/ge_pic.c static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct, h 161 arch/powerpc/sysdev/i8259.c static int i8259_host_match(struct irq_domain *h, struct device_node *node, h 164 arch/powerpc/sysdev/i8259.c struct device_node *of_node = irq_domain_get_of_node(h); h 168 arch/powerpc/sysdev/i8259.c static int i8259_host_map(struct irq_domain *h, unsigned int virq, h 185 arch/powerpc/sysdev/i8259.c static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct, h 671 arch/powerpc/sysdev/ipic.c static int ipic_host_match(struct irq_domain *h, struct device_node *node, h 675 arch/powerpc/sysdev/ipic.c struct device_node *of_node = irq_domain_get_of_node(h); h 679 arch/powerpc/sysdev/ipic.c static int ipic_host_map(struct irq_domain *h, unsigned int virq, h 682 arch/powerpc/sysdev/ipic.c struct ipic *ipic = h->host_data; h 992 arch/powerpc/sysdev/mpic.c static int mpic_host_match(struct irq_domain *h, struct device_node *node, h 996 arch/powerpc/sysdev/mpic.c struct device_node *of_node = irq_domain_get_of_node(h); h 1000 arch/powerpc/sysdev/mpic.c static int mpic_host_map(struct irq_domain *h, unsigned int virq, h 1003 arch/powerpc/sysdev/mpic.c struct mpic *mpic = h->host_data; h 1085 arch/powerpc/sysdev/mpic.c static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, h 1090 arch/powerpc/sysdev/mpic.c struct mpic *mpic = h->host_data; h 364 arch/powerpc/sysdev/tsi108_pci.c static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct, h 373 arch/powerpc/sysdev/tsi108_pci.c static int pci_irq_host_map(struct irq_domain *h, unsigned int virq, h 307 arch/powerpc/sysdev/xics/xics-common.c static int xics_host_match(struct irq_domain *h, struct device_node *node, h 330 arch/powerpc/sysdev/xics/xics-common.c static int xics_host_map(struct irq_domain *h, unsigned int virq, h 359 arch/powerpc/sysdev/xics/xics-common.c static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, h 1225 arch/powerpc/sysdev/xive/common.c static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, h 1271 arch/powerpc/sysdev/xive/common.c static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, h 1293 arch/powerpc/sysdev/xive/common.c static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, h 1007 arch/s390/crypto/aes_s390.c u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ h 84 arch/s390/include/asm/cpu_mf.h unsigned int h:1; /* 1: part. level reserved for VM use*/ h 18 arch/s390/include/uapi/asm/runtime_instr.h __u32 h : 1; h 1899 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.h = 1; h 1252 arch/s390/kernel/ptrace.c cb->h == 0 && h 281 arch/s390/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 288 arch/s390/mm/hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 297 arch/s390/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 305 arch/s390/mm/hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 329 arch/s390/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 334 arch/s390/mm/hugetlbpage.c if (len & ~huge_page_mask(h)) h 346 arch/s390/mm/hugetlbpage.c addr = ALIGN(addr, huge_page_size(h)); h 225 arch/sh/include/asm/io.h #include <asm/io_noioport.h> h 352 arch/sh/include/asm/pgtable_32.h #define PTE_BIT_FUNC(h,fn,op) \ h 353 arch/sh/include/asm/pgtable_32.h static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } h 115 arch/sparc/include/asm/uaccess_32.h __put_user_asm(x, h, addr, __pu_ret); \ h 137 arch/sparc/include/asm/uaccess_32.h case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ h 108 arch/sparc/include/asm/uaccess_64.h case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ h 279 arch/sparc/kernel/ldc.c unsigned long h, t; h 281 arch/sparc/kernel/ldc.c h = head_for_data(lp); h 283 arch/sparc/kernel/ldc.c if (t == h) h 33 arch/sparc/mm/hugetlbpage.c struct hstate *h = hstate_file(filp); h 44 arch/sparc/mm/hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 64 arch/sparc/mm/hugetlbpage.c struct hstate *h = hstate_file(filp); h 76 arch/sparc/mm/hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 101 arch/sparc/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 109 arch/sparc/mm/hugetlbpage.c if (len & ~huge_page_mask(h)) h 121 arch/sparc/mm/hugetlbpage.c addr = ALIGN(addr, huge_page_size(h)); h 3 arch/x86/boot/code16gcc.h # code16gcc.h h 289 arch/x86/boot/compressed/eboot.c u32 w, h, depth, refresh; h 303 arch/x86/boot/compressed/eboot.c &w, &h, &depth, &refresh); h 306 arch/x86/boot/compressed/eboot.c height = h; h 30 arch/x86/crypto/poly1305_glue.c asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, h 32 arch/x86/crypto/poly1305_glue.c asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, h 35 arch/x86/crypto/poly1305_glue.c asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, h 93 arch/x86/crypto/poly1305_glue.c poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks, h 106 arch/x86/crypto/poly1305_glue.c poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks, h 112 arch/x86/crypto/poly1305_glue.c poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1); h 18 arch/x86/include/asm/msr.h u32 h; h 339 arch/x86/include/asm/msr.h int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); h 340 arch/x86/include/asm/msr.h int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); h 345 arch/x86/include/asm/msr.h int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); h 346 arch/x86/include/asm/msr.h int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); h 352 arch/x86/include/asm/msr.h static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) h 354 arch/x86/include/asm/msr.h rdmsr(msr_no, *l, *h); h 357 arch/x86/include/asm/msr.h static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) h 359 arch/x86/include/asm/msr.h wrmsr(msr_no, l, h); h 375 arch/x86/include/asm/msr.h rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); h 380 arch/x86/include/asm/msr.h wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); h 383 arch/x86/include/asm/msr.h u32 *l, u32 *h) h 385 arch/x86/include/asm/msr.h return rdmsr_safe(msr_no, l, h); h 387 arch/x86/include/asm/msr.h static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) h 389 arch/x86/include/asm/msr.h return wrmsr_safe(msr_no, l, h); h 1275 arch/x86/kernel/apic/apic.c unsigned int l, h; h 1277 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); h 1279 arch/x86/kernel/apic/apic.c wrmsr(MSR_IA32_APICBASE, l, h); h 1991 arch/x86/kernel/apic/apic.c u32 features, h, l; h 2007 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); h 2018 arch/x86/kernel/apic/apic.c u32 h, l; h 2029 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); h 2034 arch/x86/kernel/apic/apic.c wrmsr(MSR_IA32_APICBASE, l, h); h 2656 arch/x86/kernel/apic/apic.c unsigned int l, h; h 2684 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); h 2687 arch/x86/kernel/apic/apic.c wrmsr(MSR_IA32_APICBASE, l, h); h 119 arch/x86/kernel/cpu/amd.c u32 l, h; h 166 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h); h 172 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h); h 187 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h); h 193 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h); h 213 arch/x86/kernel/cpu/amd.c u32 l, h; h 234 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K7_CLK_CTL, l, h); h 238 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); h 64 arch/x86/kernel/cpu/hypervisor.c const struct hypervisor_x86 *h = NULL, * const *p; h 74 arch/x86/kernel/cpu/hypervisor.c h = *p; h 78 arch/x86/kernel/cpu/hypervisor.c if (h) h 79 arch/x86/kernel/cpu/hypervisor.c pr_info("Hypervisor detected: %s\n", h->name); h 81 arch/x86/kernel/cpu/hypervisor.c return h; h 97 arch/x86/kernel/cpu/hypervisor.c const struct hypervisor_x86 *h; h 99 arch/x86/kernel/cpu/hypervisor.c h = detect_hypervisor_vendor(); h 101 arch/x86/kernel/cpu/hypervisor.c if (!h) h 104 arch/x86/kernel/cpu/hypervisor.c copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); h 105 arch/x86/kernel/cpu/hypervisor.c copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); h 107 arch/x86/kernel/cpu/hypervisor.c x86_hyper_type = h->type; h 302 arch/x86/kernel/cpu/mce/inject.c u32 l, h; h 305 arch/x86/kernel/cpu/mce/inject.c err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h); h 313 arch/x86/kernel/cpu/mce/inject.c err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h); h 48 arch/x86/kernel/cpu/mce/p5.c u32 l, h; h 63 arch/x86/kernel/cpu/mce/p5.c rdmsr(MSR_IA32_P5_MC_ADDR, l, h); h 64 arch/x86/kernel/cpu/mce/p5.c rdmsr(MSR_IA32_P5_MC_TYPE, l, h); h 432 arch/x86/kernel/cpu/mce/therm_throt.c u32 l, h; h 442 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); h 444 arch/x86/kernel/cpu/mce/therm_throt.c h = lvtthmr_init; h 455 arch/x86/kernel/cpu/mce/therm_throt.c if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) h 459 arch/x86/kernel/cpu/mce/therm_throt.c if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { h 468 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_THERM2_CTL, l, h); h 476 arch/x86/kernel/cpu/mce/therm_throt.c h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; h 477 arch/x86/kernel/cpu/mce/therm_throt.c apic_write(APIC_LVTTHMR, h); h 479 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); h 483 arch/x86/kernel/cpu/mce/therm_throt.c | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h); h 487 arch/x86/kernel/cpu/mce/therm_throt.c | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); h 490 arch/x86/kernel/cpu/mce/therm_throt.c l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); h 493 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 498 arch/x86/kernel/cpu/mce/therm_throt.c & ~PACKAGE_THERM_INT_PLN_ENABLE, h); h 503 arch/x86/kernel/cpu/mce/therm_throt.c | PACKAGE_THERM_INT_PLN_ENABLE), h); h 507 arch/x86/kernel/cpu/mce/therm_throt.c | PACKAGE_THERM_INT_HIGH_ENABLE), h); h 512 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); h 513 arch/x86/kernel/cpu/mce/therm_throt.c wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); h 831 arch/x86/kernel/cpu/mtrr/cleanup.c u32 l, h; h 839 arch/x86/kernel/cpu/mtrr/cleanup.c if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) h 202 arch/x86/kernel/cpu/resctrl/core.c u32 l, h, max_cbm = BIT_MASK(20) - 1; h 207 arch/x86/kernel/cpu/resctrl/core.c rdmsr(MSR_IA32_L3_CBM_BASE, l, h); h 141 arch/x86/kernel/hpet.c u32 i, id, period, cfg, status, channels, l, h; h 154 arch/x86/kernel/hpet.c h = hpet_readl(HPET_COUNTER+4); h 155 arch/x86/kernel/hpet.c pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); h 161 arch/x86/kernel/hpet.c h = hpet_readl(HPET_Tn_CFG(i)+4); h 162 arch/x86/kernel/hpet.c pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h); h 165 arch/x86/kernel/hpet.c h = hpet_readl(HPET_Tn_CMP(i)+4); h 166 arch/x86/kernel/hpet.c pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h); h 169 arch/x86/kernel/hpet.c h = hpet_readl(HPET_Tn_ROUTE(i)+4); h 170 arch/x86/kernel/hpet.c pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h); h 503 arch/x86/kvm/svm.c struct vmcb_control_area *c, *h; h 512 arch/x86/kvm/svm.c h = &svm->nested.hsave->control; h 515 arch/x86/kvm/svm.c c->intercept_cr = h->intercept_cr | g->intercept_cr; h 516 arch/x86/kvm/svm.c c->intercept_dr = h->intercept_dr | g->intercept_dr; h 517 arch/x86/kvm/svm.c c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; h 518 arch/x86/kvm/svm.c c->intercept = h->intercept | g->intercept; h 3964 arch/x86/kvm/vmx/nested.c struct vmx_msr_entry g, h; h 4034 arch/x86/kvm/vmx/nested.c gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); h 4035 arch/x86/kvm/vmx/nested.c if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { h 4041 arch/x86/kvm/vmx/nested.c if (h.index != g.index) h 4043 arch/x86/kvm/vmx/nested.c if (h.value == g.value) h 4046 arch/x86/kvm/vmx/nested.c if (nested_vmx_load_msr_check(vcpu, &h)) { h 4049 arch/x86/kvm/vmx/nested.c __func__, j, h.index, h.reserved); h 4053 arch/x86/kvm/vmx/nested.c if (kvm_set_msr(vcpu, h.index, h.value)) { h 4056 arch/x86/kvm/vmx/nested.c __func__, j, h.index, h.value); h 19 arch/x86/lib/msr-smp.c rdmsr(rv->msr_no, reg->l, reg->h); h 33 arch/x86/lib/msr-smp.c wrmsr(rv->msr_no, reg->l, reg->h); h 36 arch/x86/lib/msr-smp.c int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) h 46 arch/x86/lib/msr-smp.c *h = rv.reg.h; h 67 arch/x86/lib/msr-smp.c int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) h 76 arch/x86/lib/msr-smp.c rv.reg.h = h; h 158 arch/x86/lib/msr-smp.c rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h); h 166 arch/x86/lib/msr-smp.c rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h); h 169 arch/x86/lib/msr-smp.c int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) h 188 arch/x86/lib/msr-smp.c *h = rv.msr.reg.h; h 194 arch/x86/lib/msr-smp.c int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) h 203 arch/x86/lib/msr-smp.c rv.reg.h = h; h 20 arch/x86/math-emu/reg_constant.c #define MAKE_REG(s, e, l, h) { l, h, \ h 84 arch/x86/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 98 arch/x86/mm/hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 107 arch/x86/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 122 arch/x86/mm/hugetlbpage.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 147 arch/x86/mm/hugetlbpage.c struct hstate *h = hstate_file(file); h 151 arch/x86/mm/hugetlbpage.c if (len & ~huge_page_mask(h)) h 169 arch/x86/mm/hugetlbpage.c addr &= huge_page_mask(h); h 823 arch/x86/pci/irq.c struct irq_router_handler *h; h 850 arch/x86/pci/irq.c for (h = pirq_routers; h->vendor; h++) { h 852 arch/x86/pci/irq.c if (rt->rtr_vendor == h->vendor && h 853 arch/x86/pci/irq.c h->probe(r, pirq_router_dev, rt->rtr_device)) h 856 arch/x86/pci/irq.c if (pirq_router_dev->vendor == h->vendor && h 857 arch/x86/pci/irq.c h->probe(r, pirq_router_dev, pirq_router_dev->device)) h 260 arch/x86/xen/enlighten_hvm.c struct x86_hyper_init *h = &x86_hyper_xen_hvm.init; h 278 arch/x86/xen/enlighten_hvm.c h->init_platform = x86_init_noop; h 279 arch/x86/xen/enlighten_hvm.c h->x2apic_available = bool_x86_init_noop; h 280 arch/x86/xen/enlighten_hvm.c h->init_mem_mapping = x86_init_noop; h 281 arch/x86/xen/enlighten_hvm.c h->init_after_bootmem = x86_init_noop; h 282 arch/x86/xen/enlighten_hvm.c h->guest_late_init = xen_hvm_guest_late_init; h 23 block/blk-softirq.c static __latent_entropy void blk_done_softirq(struct softirq_action *h) h 41 crypto/poly1305_generic.c poly1305_core_init(&dctx->h); h 114 crypto/poly1305_generic.c h0 = state->h[0]; h 115 crypto/poly1305_generic.c h1 = state->h[1]; h 116 crypto/poly1305_generic.c h2 = state->h[2]; h 117 crypto/poly1305_generic.c h3 = state->h[3]; h 118 crypto/poly1305_generic.c h4 = state->h[4]; h 151 crypto/poly1305_generic.c state->h[0] = h0; h 152 crypto/poly1305_generic.c state->h[1] = h1; h 153 crypto/poly1305_generic.c state->h[2] = h2; h 154 crypto/poly1305_generic.c state->h[3] = h3; h 155 crypto/poly1305_generic.c state->h[4] = h4; h 177 crypto/poly1305_generic.c poly1305_blocks_internal(&dctx->h, &dctx->r, h 223 crypto/poly1305_generic.c h0 = state->h[0]; h 224 crypto/poly1305_generic.c h1 = state->h[1]; h 225 crypto/poly1305_generic.c h2 = state->h[2]; h 226 crypto/poly1305_generic.c h3 = state->h[3]; h 227 crypto/poly1305_generic.c h4 = state->h[4]; h 280 crypto/poly1305_generic.c poly1305_core_emit(&dctx->h, digest); h 101 crypto/sha512_generic.c u64 a, b, c, d, e, f, g, h, t1, t2; h 108 crypto/sha512_generic.c e=state[4]; f=state[5]; g=state[6]; h=state[7]; h 126 crypto/sha512_generic.c t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; h 127 crypto/sha512_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; h 129 crypto/sha512_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; h 131 crypto/sha512_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; h 133 crypto/sha512_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; h 135 crypto/sha512_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; h 136 crypto/sha512_generic.c t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; h 138 crypto/sha512_generic.c t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; h 140 crypto/sha512_generic.c t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7]; h 145 crypto/sha512_generic.c state[4] += e; state[5] += f; state[6] += g; state[7] += h; h 148 crypto/sha512_generic.c a = b = c = d = e = f = g = h = t1 = t2 = 0; h 78 crypto/sm3_generic.c u32 a, b, c, d, e, f, g, h; h 88 crypto/sm3_generic.c h = m[7]; h 99 crypto/sm3_generic.c tt2 = gg(i, e, f, g) + h + ss1 + *w; h 106 crypto/sm3_generic.c h = g; h 119 crypto/sm3_generic.c m[7] = h ^ m[7]; h 121 crypto/sm3_generic.c a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; h 917 crypto/streebog_generic.c ctx->h.qword[i] = cpu_to_le64(0x0101010101010101ULL); h 951 crypto/streebog_generic.c static void streebog_g(struct streebog_uint512 *h, h 958 crypto/streebog_generic.c streebog_xlps(h, N, &data); h 971 crypto/streebog_generic.c streebog_xor(&data, h, &data); h 972 crypto/streebog_generic.c streebog_xor(&data, m, h); h 981 crypto/streebog_generic.c streebog_g(&ctx->h, &ctx->N, &m); h 994 crypto/streebog_generic.c streebog_g(&ctx->h, &ctx->N, &ctx->m); h 997 crypto/streebog_generic.c streebog_g(&ctx->h, &buffer0, &ctx->N); h 998 crypto/streebog_generic.c streebog_g(&ctx->h, &buffer0, &ctx->Sigma); h 999 crypto/streebog_generic.c memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512)); h 78 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 80 drivers/acpi/acpi_configfs.c if (!h) h 84 drivers/acpi/acpi_configfs.c memcpy(data, h, h->length); h 86 drivers/acpi/acpi_configfs.c return h->length; h 100 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 102 drivers/acpi/acpi_configfs.c if (!h) h 105 drivers/acpi/acpi_configfs.c return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature); h 110 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 112 drivers/acpi/acpi_configfs.c if (!h) h 115 drivers/acpi/acpi_configfs.c return sprintf(str, "%d\n", h->length); h 120 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 122 drivers/acpi/acpi_configfs.c if (!h) h 125 drivers/acpi/acpi_configfs.c return sprintf(str, "%d\n", h->revision); h 130 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 132 drivers/acpi/acpi_configfs.c if (!h) h 135 drivers/acpi/acpi_configfs.c return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id); h 140 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 142 drivers/acpi/acpi_configfs.c if (!h) h 145 drivers/acpi/acpi_configfs.c return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); h 150 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 152 drivers/acpi/acpi_configfs.c if (!h) h 155 drivers/acpi/acpi_configfs.c return sprintf(str, "%d\n", h->oem_revision); h 161 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 163 drivers/acpi/acpi_configfs.c if (!h) h 166 drivers/acpi/acpi_configfs.c return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id); h 172 drivers/acpi/acpi_configfs.c struct acpi_table_header *h = get_header(cfg); h 174 drivers/acpi/acpi_configfs.c if (!h) h 177 drivers/acpi/acpi_configfs.c return sprintf(str, "%d\n", h->asl_compiler_revision); h 464 drivers/acpi/numa.c static int acpi_get_pxm(acpi_handle h) h 469 drivers/acpi/numa.c acpi_handle phandle = h; h 30 drivers/acpi/spcr.c static bool qdf2400_erratum_44_present(struct acpi_table_header *h) h 32 drivers/acpi/spcr.c if (memcmp(h->oem_id, "QCOM ", ACPI_OEM_ID_SIZE)) h 35 drivers/acpi/spcr.c if (!memcmp(h->oem_table_id, "QDF2432 ", ACPI_OEM_TABLE_ID_SIZE)) h 38 drivers/acpi/spcr.c if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && h 39 drivers/acpi/spcr.c h->oem_revision == 1) h 28 drivers/acpi/utils.c acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s) h 33 drivers/acpi/utils.c acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer); h 524 drivers/base/regmap/regmap-irq.c static int regmap_irq_map(struct irq_domain *h, unsigned int virq, h 527 drivers/base/regmap/regmap-irq.c struct regmap_irq_chip_data *data = h->host_data; h 129 drivers/block/aoe/aoecmd.c aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h) h 133 drivers/block/aoe/aoecmd.c memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); h 134 drivers/block/aoe/aoecmd.c memcpy(h->dst, t->addr, sizeof h->dst); h 135 drivers/block/aoe/aoecmd.c h->type = __constant_cpu_to_be16(ETH_P_AOE); h 136 drivers/block/aoe/aoecmd.c h->verfl = AOE_HVER; h 137 drivers/block/aoe/aoecmd.c h->major = cpu_to_be16(d->aoemajor); h 138 drivers/block/aoe/aoecmd.c h->minor = d->aoeminor; h 139 drivers/block/aoe/aoecmd.c h->cmd = AOECMD_ATA; h 140 drivers/block/aoe/aoecmd.c h->tag = cpu_to_be32(host_tag); h 321 drivers/block/aoe/aoecmd.c struct aoe_hdr *h; h 327 drivers/block/aoe/aoecmd.c h = (struct aoe_hdr *) skb_mac_header(skb); h 328 drivers/block/aoe/aoecmd.c ah = (struct aoe_atahdr *) (h + 1); h 329 drivers/block/aoe/aoecmd.c skb_put(skb, sizeof(*h) + sizeof(*ah)); h 330 drivers/block/aoe/aoecmd.c memset(h, 0, skb->len); h 336 drivers/block/aoe/aoecmd.c f->tag = aoehdr_atainit(t->d, t, h); h 415 drivers/block/aoe/aoecmd.c struct aoe_hdr *h; h 426 drivers/block/aoe/aoecmd.c skb = new_skb(sizeof *h + sizeof *ch); h 431 drivers/block/aoe/aoecmd.c skb_put(skb, sizeof *h + sizeof *ch); h 434 drivers/block/aoe/aoecmd.c h = (struct aoe_hdr *) skb_mac_header(skb); h 435 drivers/block/aoe/aoecmd.c memset(h, 0, sizeof *h + sizeof *ch); h 437 drivers/block/aoe/aoecmd.c memset(h->dst, 0xff, sizeof h->dst); h 438 drivers/block/aoe/aoecmd.c memcpy(h->src, ifp->dev_addr, sizeof h->src); h 439 drivers/block/aoe/aoecmd.c h->type = __constant_cpu_to_be16(ETH_P_AOE); h 440 drivers/block/aoe/aoecmd.c h->verfl = AOE_HVER; h 441 drivers/block/aoe/aoecmd.c h->major = cpu_to_be16(aoemajor); h 442 drivers/block/aoe/aoecmd.c h->minor = aoeminor; h 443 drivers/block/aoe/aoecmd.c h->cmd = AOECMD_CFG; h 456 drivers/block/aoe/aoecmd.c struct aoe_hdr *h; h 470 drivers/block/aoe/aoecmd.c h = (struct aoe_hdr *) skb_mac_header(skb); h 477 drivers/block/aoe/aoecmd.c h->src, h->dst, t->nout); h 483 drivers/block/aoe/aoecmd.c h->tag = cpu_to_be32(n); h 484 drivers/block/aoe/aoecmd.c memcpy(h->dst, t->addr, sizeof h->dst); h 485 drivers/block/aoe/aoecmd.c memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); h 1305 drivers/block/aoe/aoecmd.c struct aoe_hdr *h; h 1312 drivers/block/aoe/aoecmd.c h = (struct aoe_hdr *) skb->data; h 1313 drivers/block/aoe/aoecmd.c aoemajor = be16_to_cpu(get_unaligned(&h->major)); h 1314 drivers/block/aoe/aoecmd.c d = aoedev_by_aoeaddr(aoemajor, h->minor, 0); h 1318 drivers/block/aoe/aoecmd.c aoemajor, h->minor); h 1325 drivers/block/aoe/aoecmd.c n = be32_to_cpu(get_unaligned(&h->tag)); h 1343 drivers/block/aoe/aoecmd.c get_unaligned_be16(&h->major), h 1344 drivers/block/aoe/aoecmd.c h->minor, h 1345 drivers/block/aoe/aoecmd.c get_unaligned_be32(&h->tag), h 1347 drivers/block/aoe/aoecmd.c h->src, h 1348 drivers/block/aoe/aoecmd.c h->dst); h 1379 drivers/block/aoe/aoecmd.c struct aoe_hdr *h; h 1393 drivers/block/aoe/aoecmd.c h = (struct aoe_hdr *) skb_mac_header(skb); h 1394 drivers/block/aoe/aoecmd.c ah = (struct aoe_atahdr *) (h+1); h 1395 drivers/block/aoe/aoecmd.c skb_put(skb, sizeof *h + sizeof *ah); h 1396 drivers/block/aoe/aoecmd.c memset(h, 0, skb->len); h 1397 drivers/block/aoe/aoecmd.c f->tag = aoehdr_atainit(d, t, h); h 1528 drivers/block/aoe/aoecmd.c struct aoe_hdr *h; h 1537 drivers/block/aoe/aoecmd.c h = (struct aoe_hdr *) skb_mac_header(skb); h 1538 drivers/block/aoe/aoecmd.c ch = (struct aoe_cfghdr *) (h+1); h 1544 drivers/block/aoe/aoecmd.c aoemajor = get_unaligned_be16(&h->major); h 1552 drivers/block/aoe/aoecmd.c aoemajor, (int) h->minor); h 1555 drivers/block/aoe/aoecmd.c if (h->minor == 0xff) { h 1557 drivers/block/aoe/aoecmd.c aoemajor, (int) h->minor); h 1565 drivers/block/aoe/aoecmd.c d = aoedev_by_aoeaddr(aoemajor, h->minor, 1); h 1573 drivers/block/aoe/aoecmd.c t = gettgt(d, h->src); h 1579 drivers/block/aoe/aoecmd.c t = addtgt(d, h->src, n); h 133 drivers/block/aoe/aoenet.c struct aoe_hdr *h; h 147 drivers/block/aoe/aoenet.c sn = sizeof(*h) + sizeof(*ah); h 153 drivers/block/aoe/aoenet.c h = (struct aoe_hdr *) skb->data; h 154 drivers/block/aoe/aoenet.c n = get_unaligned_be32(&h->tag); h 155 drivers/block/aoe/aoenet.c if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) h 158 drivers/block/aoe/aoenet.c if (h->verfl & AOEFL_ERR) { h 159 drivers/block/aoe/aoenet.c n = h->err; h 166 drivers/block/aoe/aoenet.c get_unaligned_be16(&h->major), h 167 drivers/block/aoe/aoenet.c h->minor, skb->dev->name, h 168 drivers/block/aoe/aoenet.c h->err, aoe_errlist[n]); h 172 drivers/block/aoe/aoenet.c switch (h->cmd) { h 181 drivers/block/aoe/aoenet.c if (h->cmd >= AOECMD_VEND_MIN) h 183 drivers/block/aoe/aoenet.c pr_info("aoe: unknown AoE command type 0x%02x\n", h->cmd); h 573 drivers/block/drbd/drbd_main.c static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size) h 575 drivers/block/drbd/drbd_main.c h->magic = cpu_to_be32(DRBD_MAGIC); h 576 drivers/block/drbd/drbd_main.c h->command = cpu_to_be16(cmd); h 577 drivers/block/drbd/drbd_main.c h->length = cpu_to_be16(size); h 581 drivers/block/drbd/drbd_main.c static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size) h 583 drivers/block/drbd/drbd_main.c h->magic = cpu_to_be16(DRBD_MAGIC_BIG); h 584 drivers/block/drbd/drbd_main.c h->command = cpu_to_be16(cmd); h 585 drivers/block/drbd/drbd_main.c h->length = cpu_to_be32(size); h 589 drivers/block/drbd/drbd_main.c static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd, h 592 drivers/block/drbd/drbd_main.c h->magic = cpu_to_be32(DRBD_MAGIC_100); h 593 drivers/block/drbd/drbd_main.c h->volume = cpu_to_be16(vnr); h 594 drivers/block/drbd/drbd_main.c h->command = cpu_to_be16(cmd); h 595 drivers/block/drbd/drbd_main.c h->length = cpu_to_be32(size); h 596 drivers/block/drbd/drbd_main.c h->pad = 0; h 926 drivers/block/drbd/drbd_receiver.c int vnr, timeout, h; h 1060 drivers/block/drbd/drbd_receiver.c h = drbd_do_features(connection); h 1061 drivers/block/drbd/drbd_receiver.c if (h <= 0) h 1062 drivers/block/drbd/drbd_receiver.c return h; h 1141 drivers/block/drbd/drbd_receiver.c return h; h 1159 drivers/block/drbd/drbd_receiver.c struct p_header100 *h = header; h 1160 drivers/block/drbd/drbd_receiver.c if (h->pad != 0) { h 1164 drivers/block/drbd/drbd_receiver.c pi->vnr = be16_to_cpu(h->volume); h 1165 drivers/block/drbd/drbd_receiver.c pi->cmd = be16_to_cpu(h->command); h 1166 drivers/block/drbd/drbd_receiver.c pi->size = be32_to_cpu(h->length); h 1169 drivers/block/drbd/drbd_receiver.c struct p_header95 *h = header; h 1170 drivers/block/drbd/drbd_receiver.c pi->cmd = be16_to_cpu(h->command); h 1171 drivers/block/drbd/drbd_receiver.c pi->size = be32_to_cpu(h->length); h 1175 drivers/block/drbd/drbd_receiver.c struct p_header80 *h = header; h 1176 drivers/block/drbd/drbd_receiver.c pi->cmd = be16_to_cpu(h->command); h 1177 drivers/block/drbd/drbd_receiver.c pi->size = be16_to_cpu(h->length); h 1838 drivers/block/drbd/drbd_receiver.c static void drbd_csum_ee_size(struct crypto_shash *h, h 1844 drivers/block/drbd/drbd_receiver.c drbd_csum_ee(h, r, d); h 5593 drivers/block/drbd/drbd_receiver.c int h; h 5598 drivers/block/drbd/drbd_receiver.c h = conn_connect(connection); h 5599 drivers/block/drbd/drbd_receiver.c if (h == 0) { h 5603 drivers/block/drbd/drbd_receiver.c if (h == -1) { h 5607 drivers/block/drbd/drbd_receiver.c } while (h == 0); h 5609 drivers/block/drbd/drbd_receiver.c if (h > 0) { h 39 drivers/block/paride/bpck.c #define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80)) h 50 drivers/block/paride/bpck.c { int r, l, h; h 59 drivers/block/paride/bpck.c h = r1(); h 60 drivers/block/paride/bpck.c return j44(l,h); h 64 drivers/block/paride/bpck.c t2(4); h = r0(); h 66 drivers/block/paride/bpck.c return h; h 71 drivers/block/paride/bpck.c h = r4(); h 73 drivers/block/paride/bpck.c return h; h 151 drivers/block/paride/bpck.c { int i, l, h; h 159 drivers/block/paride/bpck.c t2(4); h = r1(); h 160 drivers/block/paride/bpck.c buf[i] = j44(l,h); h 280 drivers/block/paride/bpck.c { int i, e, l, h, om; h 292 drivers/block/paride/bpck.c t2(4); h = r1(); h 293 drivers/block/paride/bpck.c buf[i] = j44(l,h); h 47 drivers/block/paride/comm.c { int l, h, r; h 54 drivers/block/paride/comm.c w2(6); l = r1(); w0(0x80); h = r1(); w2(4); h 55 drivers/block/paride/comm.c return j44(l,h); h 58 drivers/block/paride/comm.c w0(0); w2(0x26); h = r0(); w2(4); h 59 drivers/block/paride/comm.c return h; h 64 drivers/block/paride/comm.c w2(0x24); h = r4(); w2(4); h 65 drivers/block/paride/comm.c return h; h 110 drivers/block/paride/comm.c { int i, l, h; h 117 drivers/block/paride/comm.c w0(0x80); h = r1(); w2(4); h 118 drivers/block/paride/comm.c buf[i] = j44(l,h); h 43 drivers/block/paride/friq.c #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) h 53 drivers/block/paride/friq.c { int h,l,r; h 59 drivers/block/paride/friq.c w2(4); h = r1(); h 62 drivers/block/paride/friq.c return j44(l,h); h 79 drivers/block/paride/friq.c { int h, l, k, ph; h 86 drivers/block/paride/friq.c w2(4); h = r1(); h 87 drivers/block/paride/friq.c buf[k] = j44(l,h); h 39 drivers/block/paride/frpw.c #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) h 49 drivers/block/paride/frpw.c { int h,l,r; h 56 drivers/block/paride/frpw.c w2(4); h = r1(); h 59 drivers/block/paride/frpw.c return j44(l,h); h 76 drivers/block/paride/frpw.c { int h, l, k, ph; h 83 drivers/block/paride/frpw.c w2(4); h = r1(); h 84 drivers/block/paride/frpw.c buf[k] = j44(l,h); h 38 drivers/block/paride/on20.c { int h,l, r ; h 47 drivers/block/paride/on20.c w2(4); w2(6); h = r1(); h 49 drivers/block/paride/on20.c return j44(l,h); h 89 drivers/block/paride/on20.c { int k, l, h; h 98 drivers/block/paride/on20.c w2(6); h = r1(); w2(4); h 99 drivers/block/paride/on20.c buf[k] = j44(l,h); h 327 drivers/block/paride/pd.c static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func) h 329 drivers/block/paride/pd.c write_reg(disk, 6, DRIVE(disk) + h); h 342 drivers/block/paride/pd.c int c1, c0, h, s; h 348 drivers/block/paride/pd.c h = ((block >>= 8) & 15) + 0x40; h 351 drivers/block/paride/pd.c h = (block /= disk->sectors) % disk->heads; h 355 drivers/block/paride/pd.c pd_send_command(disk, count, s, h, c0, c1, func); h 105 drivers/char/ipmi/ipmi_si_hotmod.c struct ipmi_plat_data *h) h 111 drivers/char/ipmi/ipmi_si_hotmod.c h->iftype = IPMI_PLAT_IF_SI; h 120 drivers/char/ipmi/ipmi_si_hotmod.c h->type = ival; h 125 drivers/char/ipmi/ipmi_si_hotmod.c h->space = ival; h 132 drivers/char/ipmi/ipmi_si_hotmod.c rv = kstrtoul(curr, 0, &h->addr); h 150 drivers/char/ipmi/ipmi_si_hotmod.c rv = check_hotmod_int_op(curr, o, "rsp", &h->regspacing); h 155 drivers/char/ipmi/ipmi_si_hotmod.c rv = check_hotmod_int_op(curr, o, "rsi", &h->regsize); h 160 drivers/char/ipmi/ipmi_si_hotmod.c rv = check_hotmod_int_op(curr, o, "rsh", &h->regshift); h 165 drivers/char/ipmi/ipmi_si_hotmod.c rv = check_hotmod_int_op(curr, o, "irq", &h->irq); h 170 drivers/char/ipmi/ipmi_si_hotmod.c rv = check_hotmod_int_op(curr, o, "ipmb", &h->slave_addr); h 180 drivers/char/ipmi/ipmi_si_hotmod.c h->addr_source = SI_HOTMOD; h 190 drivers/char/ipmi/ipmi_si_hotmod.c struct ipmi_plat_data h; h 214 drivers/char/ipmi/ipmi_si_hotmod.c memset(&h, 0, sizeof(h)); h 215 drivers/char/ipmi/ipmi_si_hotmod.c rv = parse_hotmod_str(curr, &op, &h); h 222 drivers/char/ipmi/ipmi_si_hotmod.c &h); h 226 drivers/char/ipmi/ipmi_si_hotmod.c dev = ipmi_si_remove_by_data(h.space, h.type, h.addr); h 112 drivers/char/rtc.c #define hpet_register_irq_handler(h) ({ 0; }) h 113 drivers/char/rtc.c #define hpet_unregister_irq_handler(h) ({ 0; }) h 22 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 23 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 24 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 26 drivers/clk/qcom/clk-hfpll.c if (likely(h->init_done)) h 50 drivers/clk/qcom/clk-hfpll.c h->init_done = true; h 55 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 56 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 57 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 91 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 92 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 93 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 96 drivers/clk/qcom/clk-hfpll.c spin_lock_irqsave(&h->lock, flags); h 100 drivers/clk/qcom/clk-hfpll.c spin_unlock_irqrestore(&h->lock, flags); h 105 drivers/clk/qcom/clk-hfpll.c static void __clk_hfpll_disable(struct clk_hfpll *h) h 107 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 108 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 120 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 123 drivers/clk/qcom/clk-hfpll.c spin_lock_irqsave(&h->lock, flags); h 124 drivers/clk/qcom/clk-hfpll.c __clk_hfpll_disable(h); h 125 drivers/clk/qcom/clk-hfpll.c spin_unlock_irqrestore(&h->lock, flags); h 131 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 132 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 151 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 152 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 153 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 160 drivers/clk/qcom/clk-hfpll.c spin_lock_irqsave(&h->lock, flags); h 164 drivers/clk/qcom/clk-hfpll.c __clk_hfpll_disable(h); h 181 drivers/clk/qcom/clk-hfpll.c spin_unlock_irqrestore(&h->lock, flags); h 189 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 190 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 191 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 201 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 202 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 203 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 225 drivers/clk/qcom/clk-hfpll.c struct clk_hfpll *h = to_clk_hfpll(hw); h 226 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; h 227 drivers/clk/qcom/clk-hfpll.c struct regmap *regmap = h->clkr.regmap; h 10 drivers/clk/qcom/clk-rcg.h #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } h 28 drivers/clk/qcom/gcc-sdm660.c #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } h 54 drivers/clk/qcom/hfpll.c struct clk_hfpll *h; h 61 drivers/clk/qcom/hfpll.c h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL); h 62 drivers/clk/qcom/hfpll.c if (!h) h 78 drivers/clk/qcom/hfpll.c h->d = &hdata; h 79 drivers/clk/qcom/hfpll.c h->clkr.hw.init = &init; h 80 drivers/clk/qcom/hfpll.c spin_lock_init(&h->lock); h 82 drivers/clk/qcom/hfpll.c return devm_clk_register_regmap(&pdev->dev, &h->clkr); h 61 drivers/clocksource/arc_timer.c u32 l, h; h 83 drivers/clocksource/arc_timer.c h = read_aux_reg(ARC_REG_MCIP_READBACK); h 87 drivers/clocksource/arc_timer.c return (((u64)h) << 32) | l; h 131 drivers/clocksource/arc_timer.c u32 l, h; h 141 drivers/clocksource/arc_timer.c h = read_aux_reg(AUX_RTC_HIGH); h 145 drivers/clocksource/arc_timer.c return (((u64)h) << 32) | l; h 54 drivers/cpufreq/amd_freq_sensitivity.c &actual.l, &actual.h); h 56 drivers/cpufreq/amd_freq_sensitivity.c &reference.l, &reference.h); h 57 drivers/cpufreq/amd_freq_sensitivity.c actual.h &= 0x00ffffff; h 58 drivers/cpufreq/amd_freq_sensitivity.c reference.h &= 0x00ffffff; h 54 drivers/cpufreq/p4-clockmod.c u32 l, h; h 59 drivers/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); h 68 drivers/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); h 71 drivers/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); h 82 drivers/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); h 208 drivers/cpufreq/p4-clockmod.c u32 l, h; h 210 drivers/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); h 325 drivers/cpufreq/speedstep-centrino.c unsigned l, h; h 328 drivers/cpufreq/speedstep-centrino.c rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); h 338 drivers/cpufreq/speedstep-centrino.c rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); h 348 drivers/cpufreq/speedstep-centrino.c unsigned l, h; h 381 drivers/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); h 386 drivers/cpufreq/speedstep-centrino.c wrmsr(MSR_IA32_MISC_ENABLE, l, h); h 389 drivers/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); h 424 drivers/cpufreq/speedstep-centrino.c unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; h 466 drivers/cpufreq/speedstep-centrino.c rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); h 481 drivers/cpufreq/speedstep-centrino.c wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h); h 497 drivers/cpufreq/speedstep-centrino.c wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); h 32 drivers/crypto/amcc/crypto4xx_alg.c u32 hdr_proc, u32 h, u32 c, u32 pad_type, h 41 drivers/crypto/amcc/crypto4xx_alg.c sa->sa_command_0.bf.hash_alg = h; h 257 drivers/crypto/nx/nx-842.c unsigned int groups, hdrsize, h; h 299 drivers/crypto/nx/nx-842.c h = !n && add_header ? hdrsize : 0; h 304 drivers/crypto/nx/nx-842.c ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); h 37 drivers/dma/at_hdmac_regs.h #define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */ h 400 drivers/dma/dw/regs.h #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) h 164 drivers/dma/ppc4xx/adma.c if (i && !cb->ops[i].h && !cb->ops[i].l) h 167 drivers/dma/ppc4xx/adma.c i, cb->ops[i].h, cb->ops[i].l); h 549 drivers/dma/ppc4xx/adma.c xor_hw_desc->ops[src_idx].h |= addrh; h 1851 drivers/dma/ppc4xx/adma.c xcb->ops[xor_arg_no].h |= mask; h 1862 drivers/dma/ppc4xx/adma.c xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE; h 1874 drivers/dma/ppc4xx/adma.c xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8); h 3377 drivers/dma/ppc4xx/adma.c if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == h 3419 drivers/dma/ppc4xx/adma.c if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == h 80 drivers/dma/ppc4xx/xor.h u32 h; h 174 drivers/firewire/core-topology.c struct list_head stack, *h; h 214 drivers/firewire/core-topology.c for (i = 0, h = &stack; i < child_port_count; i++) h 215 drivers/firewire/core-topology.c h = h->prev; h 220 drivers/firewire/core-topology.c child = fw_node(h); h 279 drivers/firewire/core-topology.c __list_del(h->prev, &stack); h 75 drivers/firewire/net.c #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) h 76 drivers/firewire/net.c #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) h 77 drivers/firewire/net.c #define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1) h 78 drivers/firewire/net.c #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) h 79 drivers/firewire/net.c #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) h 221 drivers/firewire/net.c struct fwnet_header *h; h 223 drivers/firewire/net.c h = skb_push(skb, sizeof(*h)); h 224 drivers/firewire/net.c put_unaligned_be16(type, &h->h_proto); h 227 drivers/firewire/net.c memset(h->h_dest, 0, net->addr_len); h 233 drivers/firewire/net.c memcpy(h->h_dest, daddr, net->addr_len); h 245 drivers/firewire/net.c struct fwnet_header *h; h 250 drivers/firewire/net.c h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); h 251 drivers/firewire/net.c h->h_proto = type; h 252 drivers/firewire/net.c memcpy(h->h_dest, neigh->ha, net->addr_len); h 218 drivers/firmware/arm_scmi/base.c int scmi_base_protocol_init(struct scmi_handle *h) h 224 drivers/firmware/arm_scmi/base.c const struct scmi_handle *handle = h; h 100 drivers/firmware/arm_scmi/common.h void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); h 101 drivers/firmware/arm_scmi/common.h int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer); h 102 drivers/firmware/arm_scmi/common.h int scmi_do_xfer_with_response(const struct scmi_handle *h, h 104 drivers/firmware/arm_scmi/common.h int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, h 109 drivers/firmware/arm_scmi/common.h int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version); h 113 drivers/firmware/arm_scmi/common.h int scmi_base_protocol_init(struct scmi_handle *h); h 142 drivers/firmware/arm_scmi/driver.c #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle) h 112 drivers/firmware/efi/earlycon.c static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h) h 121 drivers/firmware/efi/earlycon.c s8 = *(src + h); h 145 drivers/firmware/efi/earlycon.c unsigned int h, count = 0; h 157 drivers/firmware/efi/earlycon.c for (h = 0; h < font->height; h++) { h 160 drivers/firmware/efi/earlycon.c dst = efi_earlycon_map((efi_y + h) * len, len); h 169 drivers/firmware/efi/earlycon.c efi_earlycon_write_char(dst + x*4, *s, h); h 354 drivers/firmware/efi/libstub/efi-stub-helper.c efi_file_handle_t *h, *fh = __fh; h 360 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16, h 369 drivers/firmware/efi/libstub/efi-stub-helper.c *handle = h; h 372 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, h 387 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, h 112 drivers/firmware/efi/libstub/gop.c efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; h 115 drivers/firmware/efi/libstub/gop.c status = efi_call_early(handle_protocol, h, h 120 drivers/firmware/efi/libstub/gop.c status = efi_call_early(handle_protocol, h, h 209 drivers/firmware/efi/libstub/gop.c efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; h 212 drivers/firmware/efi/libstub/gop.c status = efi_call_early(handle_protocol, h, h 217 drivers/firmware/efi/libstub/gop.c status = efi_call_early(handle_protocol, h, h 29 drivers/firmware/qcom_scm-64.c #define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ h 37 drivers/firmware/qcom_scm-64.c (((h) & 0x3) << 18) | \ h 139 drivers/firmware/ti_sci.c #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) h 245 drivers/gpio/gpio-em.c static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int irq, h 248 drivers/gpio/gpio-em.c struct em_gio_priv *p = h->host_data; h 252 drivers/gpio/gpio-em.c irq_set_chip_data(irq, h->host_data); h 271 drivers/gpio/gpio-mpc8xxx.c static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq, h 274 drivers/gpio/gpio-mpc8xxx.c irq_set_chip_data(irq, h->host_data); h 92 drivers/gpio/gpio-sodaville.c static int sdv_xlate(struct irq_domain *h, struct device_node *node, h 98 drivers/gpio/gpio-sodaville.c if (node != irq_domain_get_of_node(h)) h 422 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c int h; h 446 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c common_modes[i].h > 768) h 451 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c common_modes[i].h > native_mode->vdisplay || h 453 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c common_modes[i].h == native_mode->vdisplay)) h 456 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c if (common_modes[i].w < 320 || common_modes[i].h < 200) h 459 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); h 29 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) h 30 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) h 36 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) h 212 drivers/gpu/drm/amd/amdgpu/cikd.h #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) h 213 drivers/gpu/drm/amd/amdgpu/cikd.h #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) h 214 drivers/gpu/drm/amd/amdgpu/cikd.h #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) h 215 drivers/gpu/drm/amd/amdgpu/cikd.h #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) h 284 drivers/gpu/drm/amd/amdgpu/dce_virtual.c int h; h 306 drivers/gpu/drm/amd/amdgpu/dce_virtual.c mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); h 343 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \ h 348 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \ h 35 drivers/gpu/drm/amd/amdgpu/nvd.h #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) h 36 drivers/gpu/drm/amd/amdgpu/nvd.h #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) h 37 drivers/gpu/drm/amd/amdgpu/nvd.h #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) h 38 drivers/gpu/drm/amd/amdgpu/nvd.h #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) h 3358 drivers/gpu/drm/amd/amdgpu/si_dpm.c static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) h 3368 drivers/gpu/drm/amd/amdgpu/si_dpm.c a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); h 37 drivers/gpu/drm/amd/amdgpu/soc15d.h #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) h 38 drivers/gpu/drm/amd/amdgpu/soc15d.h #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) h 39 drivers/gpu/drm/amd/amdgpu/soc15d.h #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) h 40 drivers/gpu/drm/amd/amdgpu/soc15d.h #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) h 94 drivers/gpu/drm/amd/amdgpu/vid.h #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) h 95 drivers/gpu/drm/amd/amdgpu/vid.h #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) h 96 drivers/gpu/drm/amd/amdgpu/vid.h #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) h 97 drivers/gpu/drm/amd/amdgpu/vid.h #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) h 4934 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c int h; h 4956 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c common_modes[i].h > native_mode->vdisplay || h 4958 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c common_modes[i].h == native_mode->vdisplay)) h 4963 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c common_modes[i].h == curmode->vdisplay) { h 4974 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c common_modes[i].h); h 35 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c const struct command_table_helper **h, h 42 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c *h = dal_cmd_tbl_helper_dce80_get_table(); h 46 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c *h = dal_cmd_tbl_helper_dce110_get_table(); h 50 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c *h = dal_cmd_tbl_helper_dce110_get_table(); h 55 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c *h = dal_cmd_tbl_helper_dce112_get_table(); h 170 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c const struct command_table_helper *h, h 197 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c (uint8_t)(h->transmitter_bp_to_atom(control->transmitter)); h 200 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c ctrl_param->ucAction = h->encoder_action_to_atom(control->action); h 203 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c (uint8_t)(h->encoder_mode_bp_to_atom( h 34 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h, h 46 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h const struct command_table_helper *h, h 36 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c const struct command_table_helper **h, h 43 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce80_get_table(); h 47 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce110_get_table(); h 51 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce110_get_table(); h 56 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce112_get_table2(); h 61 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce112_get_table2(); h 67 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce112_get_table2(); h 72 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce112_get_table2(); h 77 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c *h = dal_cmd_tbl_helper_dce112_get_table2(); h 34 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h, h 43 drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h const struct command_table_helper *h, h 896 drivers/gpu/drm/amd/display/dc/core/dc_resource.c data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int( h 918 drivers/gpu/drm/amd/display/dc/core/dc_resource.c orthogonal_rotation ? &data->inits.v : &data->inits.h, h 936 drivers/gpu/drm/amd/display/dc/core/dc_resource.c orthogonal_rotation ? &data->inits.h : &data->inits.v, h 600 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c init_frac = dc_fixpt_u0d19(data->inits.h) << 5; h 601 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c init_int = dc_fixpt_floor(data->inits.h); h 162 drivers/gpu/drm/amd/display/dc/inc/hw/transform.h struct fixed31_32 h; h 47 drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h #define HV_SIZE(h, v) (((h) & 0x1FFF) + (((v) & 0x1FFF) << 16)) h 48 drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h #define HV_OFFSET(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16)) h 49 drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h #define HV_CROP(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16)) h 793 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c u32 h = dflow->in_h; h 804 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c swap(w, h); h 806 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h); h 811 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c dflow->out_h >= 2 * h; h 94 drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c int w = mode->hdisplay, h = mode->vdisplay; h 99 drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c if ((h < mode_config->min_height) || (h > mode_config->max_height)) h 409 drivers/gpu/drm/arm/malidp_hw.c u16 h, u32 fmt, bool has_modifier) h 443 drivers/gpu/drm/arm/malidp_hw.c u8 h = (u8)se_config->hcoeff - 1; h 446 drivers/gpu/drm/arm/malidp_hw.c if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) || h 450 drivers/gpu/drm/arm/malidp_hw.c if ((h == v) && (se_config->hcoeff != old_config->hcoeff || h 464 drivers/gpu/drm/arm/malidp_hw.c 0, h); h 505 drivers/gpu/drm/arm/malidp_hw.c int num_planes, u16 w, u16 h, u32 fmt_id, h 536 drivers/gpu/drm/arm/malidp_hw.c malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), h 751 drivers/gpu/drm/arm/malidp_hw.c u16 h, u32 fmt, bool has_modifier) h 778 drivers/gpu/drm/arm/malidp_hw.c u16 h, u32 fmt, bool has_modifier) h 846 drivers/gpu/drm/arm/malidp_hw.c int num_planes, u16 w, u16 h, u32 fmt_id, h 873 drivers/gpu/drm/arm/malidp_hw.c malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), h 183 drivers/gpu/drm/arm/malidp_hw.h int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, h 204 drivers/gpu/drm/arm/malidp_hw.h s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id, h 48 drivers/gpu/drm/arm/malidp_mw.c int w = mode->hdisplay, h = mode->vdisplay; h 53 drivers/gpu/drm/arm/malidp_mw.c if ((h < mode_config->min_height) || (h > mode_config->max_height)) h 595 drivers/gpu/drm/armada/armada_crtc.c uint32_t yoff, yscr, h = dcrtc->cursor_h; h 618 drivers/gpu/drm/armada/armada_crtc.c h -= min(yoff, h); h 619 drivers/gpu/drm/armada/armada_crtc.c } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) { h 622 drivers/gpu/drm/armada/armada_crtc.c h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0); h 633 drivers/gpu/drm/armada/armada_crtc.c h /= 2; h 636 drivers/gpu/drm/armada/armada_crtc.c if (!dcrtc->cursor_obj || !h || !w) { h 659 drivers/gpu/drm/armada/armada_crtc.c if (dcrtc->cursor_hw_sz != (h << 16 | w)) { h 672 drivers/gpu/drm/armada/armada_crtc.c armada_load_cursor_argb(dcrtc->base, pix, s, w, h); h 678 drivers/gpu/drm/armada/armada_crtc.c dcrtc->cursor_hw_sz = h << 16 | w; h 692 drivers/gpu/drm/armada/armada_crtc.c struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h) h 702 drivers/gpu/drm/armada/armada_crtc.c if (handle && w > 0 && h > 0) { h 704 drivers/gpu/drm/armada/armada_crtc.c if (w > 64 || h > 64 || (w > 32 && h > 32)) h 717 drivers/gpu/drm/armada/armada_crtc.c if (obj->obj.size < w * h * 4) { h 731 drivers/gpu/drm/armada/armada_crtc.c dcrtc->cursor_h = h; h 94 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_SIZE(w, h) (((w) - 1) | (((h) - 1) << 16)) h 115 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_DISC_SIZE(w, h) (((w) - 1) | (((h) - 1) << 16)) h 718 drivers/gpu/drm/bridge/sil-sii8620.c static void sii8620_mhl_burst_hdr_set(struct mhl3_burst_header *h, h 721 drivers/gpu/drm/bridge/sil-sii8620.c h->id = cpu_to_be16(id); h 722 drivers/gpu/drm/bridge/sil-sii8620.c h->total_entries = 1; h 723 drivers/gpu/drm/bridge/sil-sii8620.c h->sequence_index = 1; h 1504 drivers/gpu/drm/bridge/tc358767.c bool h = val & INT_GPIO_H(tc->hpd_pin); h 1508 drivers/gpu/drm/bridge/tc358767.c h ? "H" : "", lc ? "LC" : ""); h 1510 drivers/gpu/drm/bridge/tc358767.c if (h || lc) h 645 drivers/gpu/drm/drm_edid.c short h; h 2246 drivers/gpu/drm/drm_edid.c int w, h; h 2262 drivers/gpu/drm/drm_edid.c (mode->vdisplay == cea_interlaced[i].h / 2)) { h 2524 drivers/gpu/drm/drm_edid.c newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); h 2553 drivers/gpu/drm/drm_edid.c newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); h 2639 drivers/gpu/drm/drm_edid.c est3_modes[m].h, h 5381 drivers/gpu/drm/drm_edid.c u16 w, h; h 5387 drivers/gpu/drm/drm_edid.c h = tile->tile_size[2] | tile->tile_size[3] << 8; h 5403 drivers/gpu/drm/drm_edid.c connector->tile_v_size = h + 1; h 5406 drivers/gpu/drm/drm_edid.c DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1); h 418 drivers/gpu/drm/exynos/exynos5433_drm_decon.c COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1); h 425 drivers/gpu/drm/exynos/exynos5433_drm_decon.c COORDINATE_Y(state->crtc.y + state->crtc.h - 1); h 439 drivers/gpu/drm/exynos/exynos5433_drm_decon.c val = dma_addr + pitch * state->src.h; h 424 drivers/gpu/drm/exynos/exynos7_drm_decon.c state->crtc.w, state->crtc.h); h 433 drivers/gpu/drm/exynos/exynos7_drm_decon.c last_y = state->crtc.y + state->crtc.h; h 45 drivers/gpu/drm/exynos/exynos_drm_drv.h unsigned int w, h; h 479 drivers/gpu/drm/exynos/exynos_drm_fimc.c v2 = buf->buf.height - buf->rect.h - buf->rect.y; h 482 drivers/gpu/drm/exynos/exynos_drm_fimc.c buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, h 520 drivers/gpu/drm/exynos/exynos_drm_fimc.c buf->rect.y, buf->rect.w, buf->rect.h); h 527 drivers/gpu/drm/exynos/exynos_drm_fimc.c EXYNOS_CIREAL_ISIZE_HEIGHT(buf->rect.h)); h 750 drivers/gpu/drm/exynos/exynos_drm_fimc.c src_w = src->h; h 754 drivers/gpu/drm/exynos/exynos_drm_fimc.c src_h = src->h; h 758 drivers/gpu/drm/exynos/exynos_drm_fimc.c dst_w = dst->h; h 762 drivers/gpu/drm/exynos/exynos_drm_fimc.c dst_h = dst->h; h 863 drivers/gpu/drm/exynos/exynos_drm_fimc.c buf->rect.w, buf->rect.h); h 883 drivers/gpu/drm/exynos/exynos_drm_fimc.c cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.h) | h 887 drivers/gpu/drm/exynos/exynos_drm_fimc.c EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.h)); h 891 drivers/gpu/drm/exynos/exynos_drm_fimc.c cfg = EXYNOS_CITAREA_TARGET_AREA(buf->rect.w * buf->rect.h); h 1231 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) }, h 1232 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(AREA, .h = { 16, 4224, 2 }, .v = { 16, 0, 2 }) }, h 1233 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1920 }, .v = { 128, 0 }) }, h 1234 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, h 1239 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) }, h 1240 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(AREA, .h = { 16, 1920, 2 }, .v = { 16, 0, 2 }) }, h 1241 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1366 }, .v = { 128, 0 }) }, h 1242 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, h 1247 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) }, h 1248 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(AREA, .h = { 128, 1920, 2 }, .v = { 128, 0, 2 }) }, h 1249 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, h 1254 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) }, h 1255 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SIZE_LIMIT(AREA, .h = { 128, 1366, 2 }, .v = { 128, 0, 2 }) }, h 1256 drivers/gpu/drm/exynos/exynos_drm_fimc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, h 814 drivers/gpu/drm/exynos/exynos_drm_fimd.c size = pitch * state->crtc.h; h 822 drivers/gpu/drm/exynos/exynos_drm_fimd.c state->crtc.w, state->crtc.h); h 843 drivers/gpu/drm/exynos/exynos_drm_fimd.c last_y = state->crtc.y + state->crtc.h; h 861 drivers/gpu/drm/exynos/exynos_drm_fimd.c val = state->crtc.w * state->crtc.h; h 577 drivers/gpu/drm/exynos/exynos_drm_gsc.c GSC_CROPPED_HEIGHT(buf->rect.h)); h 752 drivers/gpu/drm/exynos/exynos_drm_gsc.c src_h = src->h; h 755 drivers/gpu/drm/exynos/exynos_drm_gsc.c dst_w = dst->h; h 759 drivers/gpu/drm/exynos/exynos_drm_gsc.c dst_h = dst->h; h 877 drivers/gpu/drm/exynos/exynos_drm_gsc.c cfg = (GSC_SCALED_WIDTH(buf->rect.h) | h 881 drivers/gpu/drm/exynos/exynos_drm_gsc.c GSC_SCALED_HEIGHT(buf->rect.h)); h 1362 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) }, h 1363 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) }, h 1364 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2048 }, .v = { 16, 2048 }) }, h 1365 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, h 1370 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) }, h 1371 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) }, h 1372 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(ROTATED, .h = { 16, 2016 }, .v = { 8, 2016 }) }, h 1373 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, h 1378 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) }, h 1379 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, h 1380 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, h 1381 drivers/gpu/drm/exynos/exynos_drm_gsc.c { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, h 270 drivers/gpu/drm/exynos/exynos_drm_ipp.c task->src.rect.h = task->dst.rect.h = UINT_MAX; h 406 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct drm_exynos_ipp_limit_val h; h 444 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->h.min, l->h.min); h 445 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->h.max, l->h.max); h 446 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->h.align, l->h.align); h 480 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; h 487 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (!__size_limit_check(real_width, &l.h) || h 492 drivers/gpu/drm/exynos/exynos_drm_ipp.c lv = &l.h; h 498 drivers/gpu/drm/exynos/exynos_drm_ipp.c !__size_limit_check(buf->rect.h, lv) || h 535 drivers/gpu/drm/exynos/exynos_drm_ipp.c lh = (!swap) ? &limits->h : &limits->v; h 536 drivers/gpu/drm/exynos/exynos_drm_ipp.c lv = (!swap) ? &limits->v : &limits->h; h 537 drivers/gpu/drm/exynos/exynos_drm_ipp.c dw = (!swap) ? dst->w : dst->h; h 538 drivers/gpu/drm/exynos/exynos_drm_ipp.c dh = (!swap) ? dst->h : dst->w; h 541 drivers/gpu/drm/exynos/exynos_drm_ipp.c !__scale_limit_check(src->h, dh, lv->min, lv->max)) h 615 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (src->rect.h == UINT_MAX) h 616 drivers/gpu/drm/exynos/exynos_drm_ipp.c src->rect.h = src->buf.height; h 619 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (dst->rect.h == UINT_MAX) h 620 drivers/gpu/drm/exynos/exynos_drm_ipp.c dst->rect.h = dst->buf.height; h 623 drivers/gpu/drm/exynos/exynos_drm_ipp.c src->rect.y + src->rect.h > (src->buf.height) || h 625 drivers/gpu/drm/exynos/exynos_drm_ipp.c dst->rect.y + dst->rect.h > (dst->buf.height)) { h 633 drivers/gpu/drm/exynos/exynos_drm_ipp.c src->rect.h != dst->rect.h)) || h 634 drivers/gpu/drm/exynos/exynos_drm_ipp.c (swap && (src->rect.w != dst->rect.h || h 635 drivers/gpu/drm/exynos/exynos_drm_ipp.c src->rect.h != dst->rect.w))) h 109 drivers/gpu/drm/exynos/exynos_drm_plane.c exynos_state->src.h = (actual_h * exynos_state->v_ratio) >> 16; h 115 drivers/gpu/drm/exynos/exynos_drm_plane.c exynos_state->crtc.h = actual_h; h 120 drivers/gpu/drm/exynos/exynos_drm_plane.c exynos_state->crtc.w, exynos_state->crtc.h); h 212 drivers/gpu/drm/exynos/exynos_drm_plane.c if (state->src.h == state->crtc.h) h 151 drivers/gpu/drm/exynos/exynos_drm_rotator.c val = ROT_SRC_CROP_SIZE_H(buf->rect.h) | h 360 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) }, h 361 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, h 365 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) }, h 366 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) }, h 370 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) }, h 371 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) }, h 375 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) }, h 376 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, h 380 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) }, h 381 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, h 385 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) }, h 386 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, h 390 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) }, h 391 drivers/gpu/drm/exynos/exynos_drm_rotator.c { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, h 201 drivers/gpu/drm/exynos/exynos_drm_scaler.c val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h); h 248 drivers/gpu/drm/exynos/exynos_drm_scaler.c val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h); h 270 drivers/gpu/drm/exynos/exynos_drm_scaler.c h_ratio = (src_pos->h << 16) / dst_pos->w; h 271 drivers/gpu/drm/exynos/exynos_drm_scaler.c v_ratio = (src_pos->w << 16) / dst_pos->h; h 274 drivers/gpu/drm/exynos/exynos_drm_scaler.c v_ratio = (src_pos->h << 16) / dst_pos->h; h 597 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, h 598 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, h 599 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, h 604 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, h 605 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) }, h 606 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, h 611 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, h 612 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, h 617 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K })}, h 618 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SIZE_LIMIT(AREA, .h.align = 16, .v.align = 16) }, h 619 drivers/gpu/drm/exynos/exynos_drm_scaler.c { IPP_SCALE_LIMIT(.h = {1, 1}, .v = {1, 1})}, h 567 drivers/gpu/drm/exynos/exynos_mixer.c vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2); h 569 drivers/gpu/drm/exynos/exynos_mixer.c vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); h 572 drivers/gpu/drm/exynos/exynos_mixer.c vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); h 574 drivers/gpu/drm/exynos/exynos_mixer.c vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); h 664 drivers/gpu/drm/exynos/exynos_mixer.c val |= MXR_GRP_WH_HEIGHT(state->src.h); h 1046 drivers/gpu/drm/exynos/exynos_mixer.c u32 w = mode->hdisplay, h = mode->vdisplay; h 1049 drivers/gpu/drm/exynos/exynos_mixer.c w, h, mode->vrefresh, h 1055 drivers/gpu/drm/exynos/exynos_mixer.c if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) || h 1056 drivers/gpu/drm/exynos/exynos_mixer.c (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || h 1057 drivers/gpu/drm/exynos/exynos_mixer.c (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080)) h 1060 drivers/gpu/drm/exynos/exynos_mixer.c if ((w == 1024 && h == 768) || h 1061 drivers/gpu/drm/exynos/exynos_mixer.c (w == 1366 && h == 768) || h 1062 drivers/gpu/drm/exynos/exynos_mixer.c (w == 1280 && h == 1024)) h 386 drivers/gpu/drm/i810/i810_dma.c dev_priv->h = init->h; h 629 drivers/gpu/drm/i810/i810_dma.c pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) h 687 drivers/gpu/drm/i810/i810_dma.c unsigned int h = pbox->y2 - pbox->y1; h 693 drivers/gpu/drm/i810/i810_dma.c pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) h 699 drivers/gpu/drm/i810/i810_dma.c OUT_RING((h << 16) | (w * cpp)); h 103 drivers/gpu/drm/i810/i810_drv.h int w, h; h 3396 drivers/gpu/drm/i915/display/intel_display.c int h = drm_rect_height(&plane_state->base.src) >> 16; h 3408 drivers/gpu/drm/i915/display/intel_display.c if (w > max_width || h > max_height) { h 3410 drivers/gpu/drm/i915/display/intel_display.c w, h, max_width, max_height); h 3490 drivers/gpu/drm/i915/display/intel_display.c int h = drm_rect_height(&plane_state->base.src) >> 17; h 3497 drivers/gpu/drm/i915/display/intel_display.c if (w > max_width || h > max_height) { h 3499 drivers/gpu/drm/i915/display/intel_display.c w, h, max_width, max_height); h 134 drivers/gpu/drm/i915/display/intel_dsi.h static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) h 136 drivers/gpu/drm/i915/display/intel_dsi.h return container_of(h, struct intel_dsi_host, base); h 1729 drivers/gpu/drm/i915/display/intel_tv.c u16 w, h; h 1782 drivers/gpu/drm/i915/display/intel_tv.c input->h > intel_tv_mode_vdisplay(tv_mode)) h 1802 drivers/gpu/drm/i915/display/intel_tv.c intel_tv_scale_mode_vert(mode, input->h, 0, 0); h 53 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static int hang_init(struct hang *h, struct intel_gt *gt) h 58 drivers/gpu/drm/i915/gt/selftest_hangcheck.c memset(h, 0, sizeof(*h)); h 59 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->gt = gt; h 61 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->ctx = kernel_context(gt->i915); h 62 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (IS_ERR(h->ctx)) h 63 drivers/gpu/drm/i915/gt/selftest_hangcheck.c return PTR_ERR(h->ctx); h 65 drivers/gpu/drm/i915/gt/selftest_hangcheck.c GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); h 67 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); h 68 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (IS_ERR(h->hws)) { h 69 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = PTR_ERR(h->hws); h 73 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); h 74 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (IS_ERR(h->obj)) { h 75 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = PTR_ERR(h->obj); h 79 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); h 80 drivers/gpu/drm/i915/gt/selftest_hangcheck.c vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); h 85 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->seqno = memset(vaddr, 0xff, PAGE_SIZE); h 87 drivers/gpu/drm/i915/gt/selftest_hangcheck.c vaddr = i915_gem_object_pin_map(h->obj, h 93 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->batch = vaddr; h 98 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_unpin_map(h->hws); h 100 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_put(h->obj); h 102 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_put(h->hws); h 104 drivers/gpu/drm/i915/gt/selftest_hangcheck.c kernel_context_close(h->ctx); h 131 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_create_request(struct hang *h, struct intel_engine_cs *engine) h 133 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct intel_gt *gt = h->gt; h 134 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; h 153 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_unpin_map(h->obj); h 154 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_put(h->obj); h 156 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->obj = obj; h 157 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h->batch = vaddr; h 159 drivers/gpu/drm/i915/gt/selftest_hangcheck.c vma = i915_vma_instance(h->obj, vm, NULL); h 163 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hws = i915_vma_instance(h->hws, vm, NULL); h 175 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = igt_request_alloc(h->ctx, engine); h 189 drivers/gpu/drm/i915/gt/selftest_hangcheck.c batch = h->batch; h 270 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) h 272 drivers/gpu/drm/i915/gt/selftest_hangcheck.c return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); h 275 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static void hang_fini(struct hang *h) h 277 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *h->batch = MI_BATCH_BUFFER_END; h 278 drivers/gpu/drm/i915/gt/selftest_hangcheck.c intel_gt_chipset_flush(h->gt); h 280 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_unpin_map(h->obj); h 281 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_put(h->obj); h 283 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_unpin_map(h->hws); h 284 drivers/gpu/drm/i915/gt/selftest_hangcheck.c i915_gem_object_put(h->hws); h 286 drivers/gpu/drm/i915/gt/selftest_hangcheck.c kernel_context_close(h->ctx); h 288 drivers/gpu/drm/i915/gt/selftest_hangcheck.c igt_flush_test(h->gt->i915, I915_WAIT_LOCKED); h 291 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static bool wait_until_running(struct hang *h, struct i915_request *rq) h 293 drivers/gpu/drm/i915/gt/selftest_hangcheck.c return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), h 296 drivers/gpu/drm/i915/gt/selftest_hangcheck.c wait_for(i915_seqno_passed(hws_seqno(h, rq), h 307 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 313 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 324 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 334 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *h.batch = MI_BATCH_BUFFER_END; h 357 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 557 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 567 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 597 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 608 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!wait_until_running(&h, rq)) { h 612 drivers/gpu/drm/i915/gt/selftest_hangcheck.c __func__, rq->fence.seqno, hws_seqno(&h, rq)); h 660 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 787 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 799 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 805 drivers/gpu/drm/i915/gt/selftest_hangcheck.c h.ctx->sched.priority = 1024; h 859 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 870 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!wait_until_running(&h, rq)) { h 874 drivers/gpu/drm/i915/gt/selftest_hangcheck.c __func__, rq->fence.seqno, hws_seqno(&h, rq)); h 992 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 1053 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 1065 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 1069 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 1078 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!wait_until_running(&h, rq)) { h 1082 drivers/gpu/drm/i915/gt/selftest_hangcheck.c __func__, rq->fence.seqno, hws_seqno(&h, rq)); h 1110 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 1191 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 1200 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 1224 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 1267 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!wait_until_running(&h, rq)) { h 1271 drivers/gpu/drm/i915/gt/selftest_hangcheck.c __func__, rq->fence.seqno, hws_seqno(&h, rq)); h 1321 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 1399 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 1407 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 1419 drivers/gpu/drm/i915/gt/selftest_hangcheck.c prev = hang_create_request(&h, engine); h 1433 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 1464 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!wait_until_running(&h, prev)) { h 1469 drivers/gpu/drm/i915/gt/selftest_hangcheck.c prev->fence.seqno, hws_seqno(&h, prev)); h 1516 drivers/gpu/drm/i915/gt/selftest_hangcheck.c *h.batch = MI_BATCH_BUFFER_END; h 1527 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 1543 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 1558 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, gt); h 1562 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 1571 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (!wait_until_running(&h, rq)) { h 1575 drivers/gpu/drm/i915/gt/selftest_hangcheck.c __func__, rq->fence.seqno, hws_seqno(&h, rq)); h 1604 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 1639 drivers/gpu/drm/i915/gt/selftest_hangcheck.c struct hang h; h 1646 drivers/gpu/drm/i915/gt/selftest_hangcheck.c err = hang_init(&h, engine->gt); h 1650 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq = hang_create_request(&h, engine); h 1659 drivers/gpu/drm/i915/gt/selftest_hangcheck.c if (wait_until_running(&h, rq)) { h 1664 drivers/gpu/drm/i915/gt/selftest_hangcheck.c rq->fence.seqno, hws_seqno(&h, rq)); h 1680 drivers/gpu/drm/i915/gt/selftest_hangcheck.c hang_fini(&h); h 82 drivers/gpu/drm/i915/gvt/firmware.c struct gvt_firmware_header *h; h 88 drivers/gpu/drm/i915/gvt/firmware.c size = sizeof(*h) + info->mmio_size + info->cfg_space_size; h 93 drivers/gpu/drm/i915/gvt/firmware.c h = firmware; h 95 drivers/gpu/drm/i915/gvt/firmware.c h->magic = VGT_MAGIC; h 96 drivers/gpu/drm/i915/gvt/firmware.c h->version = FIRMWARE_VERSION; h 97 drivers/gpu/drm/i915/gvt/firmware.c h->cfg_space_size = info->cfg_space_size; h 98 drivers/gpu/drm/i915/gvt/firmware.c h->cfg_space_offset = offsetof(struct gvt_firmware_header, data); h 99 drivers/gpu/drm/i915/gvt/firmware.c h->mmio_size = info->mmio_size; h 100 drivers/gpu/drm/i915/gvt/firmware.c h->mmio_offset = h->cfg_space_offset + h->cfg_space_size; h 102 drivers/gpu/drm/i915/gvt/firmware.c p = firmware + h->cfg_space_offset; h 104 drivers/gpu/drm/i915/gvt/firmware.c for (i = 0; i < h->cfg_space_size; i += 4) h 109 drivers/gpu/drm/i915/gvt/firmware.c p = firmware + h->mmio_offset; h 117 drivers/gpu/drm/i915/gvt/firmware.c h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); h 158 drivers/gpu/drm/i915/gvt/firmware.c struct gvt_firmware_header *h; h 164 drivers/gpu/drm/i915/gvt/firmware.c h = (struct gvt_firmware_header *)fw->data; h 175 drivers/gpu/drm/i915/gvt/firmware.c VERIFY("magic number", h->magic, VGT_MAGIC); h 176 drivers/gpu/drm/i915/gvt/firmware.c VERIFY("version", h->version, FIRMWARE_VERSION); h 177 drivers/gpu/drm/i915/gvt/firmware.c VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start)); h 178 drivers/gpu/drm/i915/gvt/firmware.c VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size); h 179 drivers/gpu/drm/i915/gvt/firmware.c VERIFY("mmio size", h->mmio_size, info->mmio_size); h 181 drivers/gpu/drm/i915/gvt/firmware.c mem = (fw->data + h->cfg_space_offset); h 214 drivers/gpu/drm/i915/gvt/firmware.c struct gvt_firmware_header *h; h 261 drivers/gpu/drm/i915/gvt/firmware.c h = (struct gvt_firmware_header *)fw->data; h 263 drivers/gpu/drm/i915/gvt/firmware.c memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset, h 264 drivers/gpu/drm/i915/gvt/firmware.c h->cfg_space_size); h 265 drivers/gpu/drm/i915/gvt/firmware.c memcpy(firmware->mmio, fw->data + h->mmio_offset, h 266 drivers/gpu/drm/i915/gvt/firmware.c h->mmio_size); h 9302 drivers/gpu/drm/i915/i915_reg.h #define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4)) h 95 drivers/gpu/drm/i915/selftests/lib_sw_fence.c struct heap_fence *h = container_of(fence, typeof(*h), fence); h 102 drivers/gpu/drm/i915/selftests/lib_sw_fence.c heap_fence_put(&h->fence); h 110 drivers/gpu/drm/i915/selftests/lib_sw_fence.c struct heap_fence *h; h 112 drivers/gpu/drm/i915/selftests/lib_sw_fence.c h = kmalloc(sizeof(*h), gfp); h 113 drivers/gpu/drm/i915/selftests/lib_sw_fence.c if (!h) h 116 drivers/gpu/drm/i915/selftests/lib_sw_fence.c i915_sw_fence_init(&h->fence, heap_fence_notify); h 117 drivers/gpu/drm/i915/selftests/lib_sw_fence.c refcount_set(&h->ref.refcount, 2); h 119 drivers/gpu/drm/i915/selftests/lib_sw_fence.c return &h->fence; h 124 drivers/gpu/drm/i915/selftests/lib_sw_fence.c struct heap_fence *h = container_of(ref, typeof(*h), ref); h 126 drivers/gpu/drm/i915/selftests/lib_sw_fence.c i915_sw_fence_fini(&h->fence); h 128 drivers/gpu/drm/i915/selftests/lib_sw_fence.c kfree_rcu(h, rcu); h 133 drivers/gpu/drm/i915/selftests/lib_sw_fence.c struct heap_fence *h = container_of(fence, typeof(*h), fence); h 135 drivers/gpu/drm/i915/selftests/lib_sw_fence.c kref_put(&h->ref, heap_fence_release); h 62 drivers/gpu/drm/mcde/mcde_dsi.c static inline struct mcde_dsi *host_to_mcde_dsi(struct mipi_dsi_host *h) h 64 drivers/gpu/drm/mcde/mcde_dsi.c return container_of(h, struct mcde_dsi, dsi_host); h 47 drivers/gpu/drm/mediatek/mtk_disp_color.c unsigned int h, unsigned int vrefresh, h 53 drivers/gpu/drm/mediatek/mtk_disp_color.c writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); h 116 drivers/gpu/drm/mediatek/mtk_disp_ovl.c unsigned int h, unsigned int vrefresh, h 119 drivers/gpu/drm/mediatek/mtk_disp_ovl.c if (w != 0 && h != 0) h 120 drivers/gpu/drm/mediatek/mtk_disp_ovl.c writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE); h 91 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c unsigned int h, unsigned int vrefresh, h 94 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c writel(w << 16 | h, comp->regs + DISP_OD_SIZE); h 110 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c unsigned int h, unsigned int vrefresh, h 113 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c writel(h << 16 | w, comp->regs + DISP_AAL_SIZE); h 127 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c unsigned int h, unsigned int vrefresh, h 130 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE); h 68 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h unsigned int h, unsigned int vrefresh, unsigned int bpc); h 92 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h unsigned int w, unsigned int h, h 96 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h comp->funcs->config(comp, w, h, vrefresh, bpc); h 191 drivers/gpu/drm/mediatek/mtk_dsi.c static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h) h 193 drivers/gpu/drm/mediatek/mtk_dsi.c return container_of(h, struct mtk_dsi, host); h 29 drivers/gpu/drm/meson/meson_plane.c #define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h) h 356 drivers/gpu/drm/mga/mga_drv.h #define SET_AGE(age, h, w) \ h 358 drivers/gpu/drm/mga/mga_drv.h (age)->head = h; \ h 362 drivers/gpu/drm/mga/mga_drv.h #define TEST_AGE(age, h, w) ((age)->wrap < w || \ h 364 drivers/gpu/drm/mga/mga_drv.h (age)->head < h)) h 797 drivers/gpu/drm/mga/mga_state.c int h = pbox[i].y2 - pbox[i].y1; h 809 drivers/gpu/drm/mga/mga_state.c MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h); h 734 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c const uint32_t h, h 743 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) { h 749 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout); h 751 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches); h 15 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c #define RESERVED_BY_OTHER(h, r) \ h 16 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c ((h)->enc_id && (h)->enc_id != r) h 340 drivers/gpu/drm/msm/msm_drv.h int w, int h, int p, uint32_t format); h 207 drivers/gpu/drm/msm/msm_fb.c msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format) h 212 drivers/gpu/drm/msm/msm_fb.c .height = h, h 39 drivers/gpu/drm/nouveau/dispnv50/atom.h } h; h 76 drivers/gpu/drm/nouveau/dispnv50/atom.h u16 h; h 93 drivers/gpu/drm/nouveau/dispnv50/atom.h u16 h; h 205 drivers/gpu/drm/nouveau/dispnv50/atom.h u16 h; h 79 drivers/gpu/drm/nouveau/dispnv50/base507c.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 210 drivers/gpu/drm/nouveau/dispnv50/base507c.c asyh->base.h = asyw->state.fb->height; h 46 drivers/gpu/drm/nouveau/dispnv50/base827c.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 37 drivers/gpu/drm/nouveau/dispnv50/base907c.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 85 drivers/gpu/drm/nouveau/dispnv50/curs507a.c if (asyw->image.w != asyw->image.h) h 259 drivers/gpu/drm/nouveau/dispnv50/head.c m->h.active = mode->crtc_htotal; h 260 drivers/gpu/drm/nouveau/dispnv50/head.c m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1; h 261 drivers/gpu/drm/nouveau/dispnv50/head.c m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1; h 262 drivers/gpu/drm/nouveau/dispnv50/head.c m->h.blanks = m->h.blanke + mode->crtc_hdisplay; h 270 drivers/gpu/drm/nouveau/dispnv50/head.c blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active; h 179 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_data(push, asyh->core.h << 16 | asyh->core.w); h 208 drivers/gpu/drm/nouveau/dispnv50/head507d.c asyh->core.h = asyh->base.h; h 220 drivers/gpu/drm/nouveau/dispnv50/head507d.c asyh->core.h = asyh->state.mode.vdisplay; h 297 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_data(push, m->v.active << 16 | m->h.active ); h 298 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_data(push, m->v.synce << 16 | m->h.synce ); h 299 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_data(push, m->v.blanke << 16 | m->h.blanke ); h 300 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_data(push, m->v.blanks << 16 | m->h.blanks ); h 64 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_data(push, asyh->core.h << 16 | asyh->core.w); h 174 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_data(push, asyh->core.h << 16 | asyh->core.w); h 249 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_data(push, m->v.active << 16 | m->h.active ); h 250 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_data(push, m->v.synce << 16 | m->h.synce ); h 251 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_data(push, m->v.blanke << 16 | m->h.blanke ); h 252 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_data(push, m->v.blanks << 16 | m->h.blanks ); h 169 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_data(push, (m->v.active << 16) | m->h.active ); h 170 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_data(push, (m->v.synce << 16) | m->h.synce ); h 171 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_data(push, (m->v.blanke << 16) | m->h.blanke ); h 172 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_data(push, (m->v.blanks << 16) | m->h.blanks ); h 175 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_data(push, (m->v.active << 16) | m->h.active ); h 176 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_data(push, (m->v.synce << 16) | m->h.synce ); h 177 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_data(push, (m->v.blanke << 16) | m->h.blanke ); h 178 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_data(push, (m->v.blanks << 16) | m->h.blanks ); h 83 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 41 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 39 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 245 drivers/gpu/drm/nouveau/dispnv50/wndw.c asyw->image.h = fb->base.height; h 128 drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 42 drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c evo_data(push, asyw->image.h << 16 | asyw->image.w); h 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h #define HEAD_MSG(h,l,f,a...) do { \ h 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h struct nvkm_head *_h = (h); \ h 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h #define HEAD_WARN(h,f,a...) HEAD_MSG((h), warn, f, ##a) h 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h #define HEAD_DBG(h,f,a...) HEAD_MSG((h), debug, f, ##a) h 82 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*audio_sym)(struct nvkm_ior *, int head, u16 h, u32 v); h 335 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c u64 h, v; h 338 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7; h 339 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c h = h * linkKBps; h 340 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c do_div(h, khz); h 341 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr); h 349 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior->func->dp.audio_sym(ior, head->id, h, v); h 49 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) h 53 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, h); h 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c gf119_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) h 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, h); h 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) h 39 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c nvkm_mask(device, 0x616568 + hoff, 0x0000ffff, h); h 42 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h) h 44 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c if (!h) h 47 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->offset = nvbios_vpstate_offset(b); h 48 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c if (!h->offset) h 51 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->version = nvbios_rd08(b, h->offset); h 52 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c switch (h->version) { h 54 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->hlen = nvbios_rd08(b, h->offset + 0x1); h 55 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->elen = nvbios_rd08(b, h->offset + 0x2); h 56 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->slen = nvbios_rd08(b, h->offset + 0x3); h 57 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->scount = nvbios_rd08(b, h->offset + 0x4); h 58 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->ecount = nvbios_rd08(b, h->offset + 0x5); h 60 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->base_id = nvbios_rd08(b, h->offset + 0x0f); h 61 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c if (h->hlen > 0x10) h 62 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->boost_id = nvbios_rd08(b, h->offset + 0x10); h 64 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->boost_id = 0xff; h 65 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c if (h->hlen > 0x11) h 66 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->tdp_id = nvbios_rd08(b, h->offset + 0x11); h 68 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c h->tdp_id = 0xff; h 76 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h, h 81 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c if (!e || !h || idx > h->ecount) h 84 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount)); h 658 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c struct nvbios_vpstate_header h; h 662 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c if (bios && !nvbios_vpstate_parse(bios, &h)) { h 664 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost)) h 666 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base)) h 229 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c u16 x, u16 y, u16 w, u16 h) h 236 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c u16 y2 = y + h - 1; h 874 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c u16 x, u16 y, u16 w, u16 h) h 880 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); h 1007 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c u16 x, u16 y, u16 w, u16 h) h 1016 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c if (size < w * h * 3) h 1026 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c size = min((u32)w * h * 3, h 1043 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c dsicm_set_update_window(ddata, x, y, w, h); h 827 drivers/gpu/drm/omapdrm/dss/dispc.c u32 h, hv; h 829 drivers/gpu/drm/omapdrm/dss/dispc.c h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0) h 839 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_ovl_write_firh_reg(dispc, plane, i, h); h 842 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_ovl_write_firh2_reg(dispc, plane, i, h); h 3822 drivers/gpu/drm/omapdrm/dss/dsi.c u16 h = dsi->vm.vactive; h 3824 drivers/gpu/drm/omapdrm/dss/dsi.c DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); h 3830 drivers/gpu/drm/omapdrm/dss/dsi.c bytespf = bytespl * h; h 445 drivers/gpu/drm/omapdrm/dss/omapdss.h u16 x, u16 y, u16 w, u16 h); h 453 drivers/gpu/drm/omapdrm/dss/omapdss.h u16 x, u16 y, u16 w, u16 h); h 538 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c u16 h, u16 align) h 554 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c h = DIV_ROUND_UP(h, geom[fmt].slot_h); h 564 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes, h 696 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h) h 700 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c *h = round_up(*h, geom[fmt].slot_h); h 713 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h) h 715 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c tiler_align(fmt, &w, &h); h 716 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c return geom[fmt].cpp * w * h; h 719 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h) h 722 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; h 95 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, u16 h, h 105 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h); h 106 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h); h 107 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h); h 159 drivers/gpu/drm/omapdrm/omap_fb.c u32 h = state->src_h >> 16; h 177 drivers/gpu/drm/omapdrm/omap_fb.c y += h - 1; h 1337 drivers/gpu/drm/omapdrm/omap_gem.c u16 h = 1, w = PAGE_SIZE >> i; h 1339 drivers/gpu/drm/omapdrm/omap_gem.c tiler_align(fmts[i], &w, &h); h 1344 drivers/gpu/drm/omapdrm/omap_gem.c usergart[i].height = h; h 1345 drivers/gpu/drm/omapdrm/omap_gem.c usergart[i].height_shift = ilog2(h); h 1347 drivers/gpu/drm/omapdrm/omap_gem.c usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); h 1353 drivers/gpu/drm/omapdrm/omap_gem.c block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); h 1363 drivers/gpu/drm/omapdrm/omap_gem.c DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h, h 36 drivers/gpu/drm/omapdrm/tcm-sita.c static void free_slots(unsigned long pos, u16 w, u16 h, h 41 drivers/gpu/drm/omapdrm/tcm-sita.c for (i = 0; i < h; i++, pos += stride) h 87 drivers/gpu/drm/omapdrm/tcm-sita.c static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset, h 123 drivers/gpu/drm/omapdrm/tcm-sita.c if ((*pos + slot_stride * h) > num_bits) h 134 drivers/gpu/drm/omapdrm/tcm-sita.c for (i = 1; i < h; i++) { h 155 drivers/gpu/drm/omapdrm/tcm-sita.c for (i = 0, index = *pos; i < h; i++, index += slot_stride) h 181 drivers/gpu/drm/omapdrm/tcm-sita.c static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align, h 189 drivers/gpu/drm/omapdrm/tcm-sita.c ret = l2r_t2b(w, h, align, offset, &pos, slot_bytes, tcm->bitmap, h 196 drivers/gpu/drm/omapdrm/tcm-sita.c area->p1.y = area->p0.y + h - 1; h 211 drivers/gpu/drm/omapdrm/tcm-sita.c u16 w, h; h 216 drivers/gpu/drm/omapdrm/tcm-sita.c h = area->p1.y - area->p0.y + 1; h 219 drivers/gpu/drm/omapdrm/tcm-sita.c h = 1; h 223 drivers/gpu/drm/omapdrm/tcm-sita.c free_slots(pos, w, h, tcm->bitmap, tcm->width); h 247 drivers/gpu/drm/qxl/qxl_display.c int h = output->index; h 252 drivers/gpu/drm/qxl/qxl_display.c if (h >= qxl_num_crtc) h 256 drivers/gpu/drm/qxl/qxl_display.c if (h >= qdev->client_monitors_config->count) h 259 drivers/gpu/drm/qxl/qxl_display.c head = &qdev->client_monitors_config->heads[h]; h 260 drivers/gpu/drm/qxl/qxl_display.c DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height); h 267 drivers/gpu/drm/qxl/qxl_display.c int h; h 281 drivers/gpu/drm/qxl/qxl_display.c extra_modes[i].h, h 282 drivers/gpu/drm/r128/r128_state.c int x, int y, int w, int h, int r, int g, int b) h 323 drivers/gpu/drm/r128/r128_state.c OUT_RING((w << 16) | h); h 384 drivers/gpu/drm/r128/r128_state.c int h = pbox[i].y2 - y; h 415 drivers/gpu/drm/r128/r128_state.c OUT_RING((w << 16) | h); h 436 drivers/gpu/drm/r128/r128_state.c OUT_RING((w << 16) | h); h 457 drivers/gpu/drm/r128/r128_state.c OUT_RING((w << 16) | h); h 484 drivers/gpu/drm/r128/r128_state.c int h = pbox[i].y2 - y; h 511 drivers/gpu/drm/r128/r128_state.c OUT_RING((w << 16) | h); h 1678 drivers/gpu/drm/radeon/cikd.h #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) h 1679 drivers/gpu/drm/radeon/cikd.h #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) h 1680 drivers/gpu/drm/radeon/cikd.h #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) h 1681 drivers/gpu/drm/radeon/cikd.h #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) h 874 drivers/gpu/drm/radeon/evergreen_cs.c unsigned w, h, d; h 877 drivers/gpu/drm/radeon/evergreen_cs.c h = r600_mip_minify(height, i); h 880 drivers/gpu/drm/radeon/evergreen_cs.c surf.nby = r600_fmt_get_nblocksy(surf.format, h); h 1419 drivers/gpu/drm/radeon/evergreend.h #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) h 1420 drivers/gpu/drm/radeon/evergreend.h #define GET_DMA_COUNT(h) ((h) & 0x000fffff) h 1421 drivers/gpu/drm/radeon/evergreend.h #define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20) h 2094 drivers/gpu/drm/radeon/r100.c static int r100_track_compress_size(int compress_format, int w, int h) h 2116 drivers/gpu/drm/radeon/r100.c hblocks = (h + block_height - 1) / block_height; h 2127 drivers/gpu/drm/radeon/r100.c unsigned face, w, h; h 2135 drivers/gpu/drm/radeon/r100.c h = track->textures[idx].cube_info[face].height; h 2138 drivers/gpu/drm/radeon/r100.c size = r100_track_compress_size(compress_format, w, h); h 2140 drivers/gpu/drm/radeon/r100.c size = w * h; h 2160 drivers/gpu/drm/radeon/r100.c unsigned u, i, w, h, d; h 2188 drivers/gpu/drm/radeon/r100.c h = track->textures[u].height; h 2190 drivers/gpu/drm/radeon/r100.c h |= track->textures[u].height_11; h 2191 drivers/gpu/drm/radeon/r100.c h = h / (1 << i); h 2193 drivers/gpu/drm/radeon/r100.c h = roundup_pow_of_two(h); h 2203 drivers/gpu/drm/radeon/r100.c size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; h 2206 drivers/gpu/drm/radeon/r100.c size += w * h * d; h 225 drivers/gpu/drm/radeon/r600_cs.c int r600_fmt_get_nblocksy(u32 format, u32 h) h 236 drivers/gpu/drm/radeon/r600_cs.c return (h + bh - 1) / bh; h 2365 drivers/gpu/drm/radeon/r600_cs.c #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) h 2366 drivers/gpu/drm/radeon/r600_cs.c #define GET_DMA_COUNT(h) ((h) & 0x0000ffff) h 2367 drivers/gpu/drm/radeon/r600_cs.c #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) h 221 drivers/gpu/drm/radeon/r600_dpm.c int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) h 231 drivers/gpu/drm/radeon/r600_dpm.c a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); h 142 drivers/gpu/drm/radeon/r600_dpm.h int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th); h 2755 drivers/gpu/drm/radeon/radeon.h #define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) h 2756 drivers/gpu/drm/radeon/radeon.h #define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) h 2894 drivers/gpu/drm/radeon/radeon.h int r600_fmt_get_nblocksy(u32 format, u32 h); h 509 drivers/gpu/drm/radeon/radeon_connectors.c int h; h 533 drivers/gpu/drm/radeon/radeon_connectors.c common_modes[i].h > 768) h 538 drivers/gpu/drm/radeon/radeon_connectors.c common_modes[i].h > native_mode->vdisplay || h 540 drivers/gpu/drm/radeon/radeon_connectors.c common_modes[i].h == native_mode->vdisplay)) h 543 drivers/gpu/drm/radeon/radeon_connectors.c if (common_modes[i].w < 320 || common_modes[i].h < 200) h 546 drivers/gpu/drm/radeon/radeon_connectors.c mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); h 3710 drivers/gpu/drm/radeon/radeon_reg.h #define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) h 3711 drivers/gpu/drm/radeon/radeon_reg.h #define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) h 3712 drivers/gpu/drm/radeon/radeon_reg.h #define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) h 3713 drivers/gpu/drm/radeon/radeon_reg.h #define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) h 3714 drivers/gpu/drm/radeon/radeon_reg.h #define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2) h 3715 drivers/gpu/drm/radeon/radeon_reg.h #define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) h 1001 drivers/gpu/drm/radeon/rv6xx_dpm.c static void rv6xx_calculate_t(u32 l_f, u32 h_f, int h, h 1009 drivers/gpu/drm/radeon/rv6xx_dpm.c a_n = (int)h_f * d_l + (int)l_f * (h - d_r); h 442 drivers/gpu/drm/savage/savage_drv.h #define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF) h 833 drivers/gpu/drm/savage/savage_state.c unsigned int x, y, w, h; h 837 drivers/gpu/drm/savage/savage_state.c h = boxes[i].y2 - boxes[i].y1; h 859 drivers/gpu/drm/savage/savage_state.c DMA_WRITE(BCI_W_H(w, h)); h 40 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_DISSIZE(w, h) (((((h) - 1) & 0xffff) << 16) | \ h 44 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYSIZE(w, h) (((((h) - 1) & 0x1fff) << 16) | \ h 78 drivers/gpu/drm/sun4i/sun4i_frontend.h #define SUN4I_FRONTEND_INSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1))) h 81 drivers/gpu/drm/sun4i/sun4i_frontend.h #define SUN4I_FRONTEND_OUTSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1))) h 16 drivers/gpu/drm/sun4i/sun8i_mixer.h #define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1)) h 24 drivers/gpu/drm/sun4i/sun8i_ui_scaler.h #define SUN8I_UI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1)) h 28 drivers/gpu/drm/sun4i/sun8i_vi_scaler.h #define SUN8I_VI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1)) h 327 drivers/gpu/drm/tegra/dc.c if (window->src.h == window->dst.h) h 363 drivers/gpu/drm/tegra/dc.c value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w); h 369 drivers/gpu/drm/tegra/dc.c v_size = window->src.h; h 382 drivers/gpu/drm/tegra/dc.c v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp); h 408 drivers/gpu/drm/tegra/dc.c v_offset += window->src.h - 1; h 703 drivers/gpu/drm/tegra/dc.c window.src.h = drm_rect_height(&plane->state->src) >> 16; h 707 drivers/gpu/drm/tegra/dc.c window.dst.h = drm_rect_height(&plane->state->dst); h 129 drivers/gpu/drm/tegra/dc.h unsigned int h; h 135 drivers/gpu/drm/tegra/dc.h unsigned int h; h 1173 drivers/gpu/drm/ttm/ttm_page_alloc.c char *h[] = {"pool", "refills", "pages freed", "size"}; h 1179 drivers/gpu/drm/ttm/ttm_page_alloc.c h[0], h[1], h[2], h[3]); h 50 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c struct hgsmi_buffer_header *h; h 55 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c total_size = size + sizeof(*h) + sizeof(*t); h 56 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c h = gen_pool_dma_alloc(guest_pool, total_size, &offset); h 57 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c if (!h) h 60 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size); h 62 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE; h 63 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c h->data_size = size; h 64 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c h->channel = channel; h 65 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c h->channel_info = channel_info; h 66 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c memset(&h->u.header_data, 0, sizeof(h->u.header_data)); h 69 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c t->checksum = hgsmi_checksum(offset, h, t); h 71 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c return (u8 *)h + sizeof(*h); h 76 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c struct hgsmi_buffer_header *h = h 77 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h)); h 78 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c size_t total_size = h->data_size + sizeof(*h) + h 81 drivers/gpu/drm/vboxvideo/vbox_hgsmi.c gen_pool_free(guest_pool, (unsigned long)h, total_size); h 74 drivers/gpu/drm/vboxvideo/vbox_main.c cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; h 60 drivers/gpu/drm/vboxvideo/vboxvideo.h u16 h; h 188 drivers/gpu/drm/vc4/vc4_txp.c int w = mode->hdisplay, h = mode->vdisplay; h 193 drivers/gpu/drm/vc4/vc4_txp.c if (h < mode_config->min_height || h > mode_config->max_height) h 44 drivers/gpu/drm/virtio/virtgpu_ioctl.c dst->h = cpu_to_le32(src->h); h 442 drivers/gpu/drm/virtio/virtgpu_ioctl.c box.w, box.h, box.x, box.y, NULL); h 69 drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h uint32 h; h 83 drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h uint32 h; h 98 drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h uint32 h; h 110 drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h uint32 h; h 449 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c u32 w, u32 h, h 490 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c for (j = 0; j < h; ++j) { h 1384 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h u32 w, u32 h, h 182 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0; h 228 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c h = dst_y2 - dst_y1; h 230 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c h = max_t(s32, 0, h); h 236 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c if (w && h) { h 243 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c while (h-- > 0) { h 258 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c if (w && h) { h 193 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->w, box->h, box->d, box_count, h 217 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c for (i = 0; i < box->h; i++) h 2378 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c (rects[i].y + rects[i].h > INT_MAX)) { h 2387 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c drm_rects[i].y2 = curr_rect.y + curr_rect.h; h 2508 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + h 2635 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->h = clips->y2 - clips->y1; h 90 drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c int w = 0, h = 0; h 94 drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c h = max(h, crtc->y + crtc->mode.vdisplay); h 102 drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], h 157 drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; h 161 drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; h 270 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c update->body.rect.h = bottom - top; h 468 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c blit->h = dirty->unit_y2 - dirty->unit_y1; h 765 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c blit->h = dirty->unit_y2 - dirty->unit_y1; h 1236 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(clip); h 1336 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(&diff.rect); h 1465 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(&clip); h 1509 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(clip); h 311 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c cb->h = cur_size->height; h 98 drivers/hid/hid-multitouch.c __s32 *x, *y, *cx, *cy, *p, *w, *h, *a; h 520 drivers/hid/hid-multitouch.c usage->h = DEFAULT_ZERO; h 812 drivers/hid/hid-multitouch.c MT_STORE_FIELD(h); h 1053 drivers/hid/hid-multitouch.c int wide = (*slot->w > *slot->h); h 1054 drivers/hid/hid-multitouch.c int major = max(*slot->w, *slot->h); h 1055 drivers/hid/hid-multitouch.c int minor = min(*slot->w, *slot->h); h 53 drivers/hid/hid-ntrig.c __u16 x, y, w, h; h 615 drivers/hid/hid-ntrig.c nd->h = value; h 677 drivers/hid/hid-ntrig.c nd->h < nd->min_height) h 687 drivers/hid/hid-ntrig.c nd->h >= nd->activation_height) { h 735 drivers/hid/hid-ntrig.c if (nd->w > nd->h) { h 741 drivers/hid/hid-ntrig.c ABS_MT_TOUCH_MINOR, nd->h); h 746 drivers/hid/hid-ntrig.c ABS_MT_TOUCH_MAJOR, nd->h); h 611 drivers/hid/hid-rmi.c static int rmi_irq_map(struct irq_domain *h, unsigned int virq, h 1369 drivers/hid/hid-sony.c int w, int h, int touch_major, int touch_minor, int orientation) h 1404 drivers/hid/hid-sony.c input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0); h 1621 drivers/hid/hid-wiimote-core.c struct wiiproto_handler *h; h 1631 drivers/hid/hid-wiimote-core.c h = &handlers[i]; h 1632 drivers/hid/hid-wiimote-core.c if (h->id == raw_data[0] && h->size < size) { h 1633 drivers/hid/hid-wiimote-core.c h->func(wdata, &raw_data[1]); h 26 drivers/hid/intel-ish-hid/ishtp/dma-if.c dma_addr_t h; h 37 drivers/hid/intel-ish-hid/ishtp/dma-if.c &h, GFP_KERNEL); h 39 drivers/hid/intel-ish-hid/ishtp/dma-if.c dev->ishtp_host_dma_tx_buf_phys = h; h 52 drivers/hid/intel-ish-hid/ishtp/dma-if.c &h, GFP_KERNEL); h 55 drivers/hid/intel-ish-hid/ishtp/dma-if.c dev->ishtp_host_dma_rx_buf_phys = h; h 68 drivers/hid/intel-ish-hid/ishtp/dma-if.c dma_addr_t h; h 71 drivers/hid/intel-ish-hid/ishtp/dma-if.c h = dev->ishtp_host_dma_tx_buf_phys; h 73 drivers/hid/intel-ish-hid/ishtp/dma-if.c dev->ishtp_host_dma_tx_buf, h); h 77 drivers/hid/intel-ish-hid/ishtp/dma-if.c h = dev->ishtp_host_dma_rx_buf_phys; h 79 drivers/hid/intel-ish-hid/ishtp/dma-if.c dev->ishtp_host_dma_rx_buf, h); h 1393 drivers/hid/wacom_wac.c int h = touch[7] * input_abs_get_res(touch_input, ABS_MT_POSITION_Y); h 1402 drivers/hid/wacom_wac.c input_report_abs(touch_input, ABS_MT_TOUCH_MAJOR, max(w, h)); h 1403 drivers/hid/wacom_wac.c input_report_abs(touch_input, ABS_MT_TOUCH_MINOR, min(w, h)); h 1404 drivers/hid/wacom_wac.c input_report_abs(touch_input, ABS_MT_ORIENTATION, w > h); h 1571 drivers/hid/wacom_wac.c int h = get_unaligned_le16(&data[offset + 12]); h 1573 drivers/hid/wacom_wac.c input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h)); h 1575 drivers/hid/wacom_wac.c min(w, h) + int_dist(t_x, t_y, c_x, c_y)); h 1576 drivers/hid/wacom_wac.c input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); h 1577 drivers/hid/wacom_wac.c input_report_abs(input, ABS_MT_ORIENTATION, w > h); h 72 drivers/hwmon/scmi-hwmon.c const struct scmi_handle *h = scmi_sensors->handle; h 75 drivers/hwmon/scmi-hwmon.c ret = h->sensor_ops->reading_get(h, sensor->id, &value); h 93 drivers/i2c/busses/i2c-ibm_iic.c # define DUMP_REGS(h,dev) dump_iic_regs((h),(dev)) h 95 drivers/i2c/busses/i2c-ibm_iic.c # define DUMP_REGS(h,dev) ((void)0) h 394 drivers/i2c/busses/i2c-stm32f7.c u16 p, l, a, h; h 520 drivers/i2c/busses/i2c-stm32f7.c for (h = 0; h < STM32F7_SCLH_MAX; h++) { h 521 drivers/i2c/busses/i2c-stm32f7.c u32 tscl_h = (h + 1) * prescaler + tsync; h 536 drivers/i2c/busses/i2c-stm32f7.c v->sclh = h; h 1259 drivers/i2c/i2c-core-base.c static int i2c_host_notify_irq_map(struct irq_domain *h, h 250 drivers/ide/ide.c unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1; h 254 drivers/ide/ide.c if (sscanf(str, "%u.%u:%u,%u,%u", &a, &b, &c, &h, &s) != 5 && h 263 drivers/ide/ide.c if (c > INT_MAX || h > 255 || s > 255) h 272 drivers/ide/ide.c ide_disks_chs[i].head = h; h 42 drivers/iio/buffer/industrialio-triggered-buffer.c irqreturn_t (*h)(int irq, void *p), h 57 drivers/iio/buffer/industrialio-triggered-buffer.c indio_dev->pollfunc = iio_alloc_pollfunc(h, h 105 drivers/iio/buffer/industrialio-triggered-buffer.c irqreturn_t (*h)(int irq, void *p), h 119 drivers/iio/buffer/industrialio-triggered-buffer.c ret = iio_triggered_buffer_setup(indio_dev, h, thread, ops); h 39 drivers/iio/common/ssp_sensors/ssp_spi.c struct ssp_msg_header *h; h 77 drivers/iio/common/ssp_sensors/ssp_spi.c struct ssp_msg_header h; h 84 drivers/iio/common/ssp_sensors/ssp_spi.c h.cmd = cmd; h 85 drivers/iio/common/ssp_sensors/ssp_spi.c h.length = cpu_to_le16(len); h 86 drivers/iio/common/ssp_sensors/ssp_spi.c h.options = cpu_to_le16(opt); h 87 drivers/iio/common/ssp_sensors/ssp_spi.c h.data = cpu_to_le32(data); h 99 drivers/iio/common/ssp_sensors/ssp_spi.c memcpy(msg->buffer, &h, SSP_HEADER_SIZE); h 661 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c int i, h, result; h 664 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c h = (rate >> 1); h 666 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) h 261 drivers/iio/industrialio-trigger.c ret = request_threaded_irq(pf->irq, pf->h, pf->thread, h 324 drivers/iio/industrialio-trigger.c *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), h 344 drivers/iio/industrialio-trigger.c pf->h = h; h 31 drivers/iio/industrialio-triggered-event.c irqreturn_t (*h)(int irq, void *p), h 34 drivers/iio/industrialio-triggered-event.c indio_dev->pollfunc_event = iio_alloc_pollfunc(h, h 418 drivers/infiniband/core/umem_odp.c struct hstate *h; h 427 drivers/infiniband/core/umem_odp.c h = hstate_vma(vma); h 428 drivers/infiniband/core/umem_odp.c umem_odp->page_shift = huge_page_shift(h); h 1786 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iwch_dev *h = to_iwch_dev(cm_id->device); h 1787 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iwch_qp *qp = get_qhp(h, conn_param->qpn); h 1877 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iwch_dev *h = to_iwch_dev(cm_id->device); h 1911 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.tdev = h->rdev.t3cdev_p; h 1915 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.qp = get_qhp(h, conn_param->qpn); h 1923 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); h 1931 drivers/infiniband/hw/cxgb3/iwch_cm.c rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr, h 1960 drivers/infiniband/hw/cxgb3/iwch_cm.c l2t_release(h->rdev.t3cdev_p, ep->l2t); h 1975 drivers/infiniband/hw/cxgb3/iwch_cm.c struct iwch_dev *h = to_iwch_dev(cm_id->device); h 1993 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->com.tdev = h->rdev.t3cdev_p; h 2003 drivers/infiniband/hw/cxgb3/iwch_cm.c ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); h 3122 drivers/infiniband/hw/cxgb4/cm.c struct c4iw_dev *h = to_c4iw_dev(cm_id->device); h 3123 drivers/infiniband/hw/cxgb4/cm.c struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); h 3161 drivers/infiniband/hw/cxgb4/cm.c ep->ord <= h->rdev.lldi.max_ordird_qp) { h 97 drivers/infiniband/hw/i40iw/i40iw_osdep.h #define i40iw_debug(h, m, s, ...) \ h 99 drivers/infiniband/hw/i40iw/i40iw_osdep.h if (((m) & (h)->debug_mask)) \ h 78 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c u32 h[OPA_VNIC_HDR_QW_LEN] = {0, 0xc0000000, 0, 0, 0}; h 80 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[2] = l4_type; h 81 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[3] = entropy; h 82 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[4] = l4_hdr << OPA_VNIC_L4_HDR_SHFT; h 85 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[0] |= (slid & OPA_16B_LID_MASK); h 86 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[2] |= ((slid >> (20 - OPA_16B_SLID_HIGH_SHFT)) & OPA_16B_SLID_MASK); h 88 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[1] |= (dlid & OPA_16B_LID_MASK); h 89 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[2] |= ((dlid >> (20 - OPA_16B_DLID_HIGH_SHFT)) & OPA_16B_DLID_MASK); h 91 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[0] |= (len << OPA_16B_LEN_SHFT); h 92 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[1] |= (rc << OPA_16B_RC_SHFT); h 93 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[1] |= (sc << OPA_16B_SC_SHFT); h 94 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c h[2] |= ((u32)pkey << OPA_16B_PKEY_SHFT); h 96 drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c memcpy(hdr, h, OPA_VNIC_HDR_LEN); h 1221 drivers/input/keyboard/applespi.c int x, y, w, h; h 1223 drivers/input/keyboard/applespi.c sts = sscanf(touchpad_dimensions, "%dx%d+%u+%u", &x, &y, &w, &h); h 1230 drivers/input/keyboard/applespi.c applespi->tp_info.y_max = y + h; h 66 drivers/input/misc/yealink.c #define _PIC(t, h, hm, n) \ h 68 drivers/input/misc/yealink.c .u = { .p = { .name = (n), .a = (h), .m = (hm) } } } h 1052 drivers/input/touchscreen/usbtouchscreen.c int x, y, begin_x, begin_y, end_x, end_y, w, h, ret; h 1098 drivers/input/touchscreen/usbtouchscreen.c h = end_y - begin_y; h 1102 drivers/input/touchscreen/usbtouchscreen.c ABS_MT_TOUCH_MAJOR, max(w,h)); h 1104 drivers/input/touchscreen/usbtouchscreen.c ABS_MT_TOUCH_MINOR, min(x,h)); h 1108 drivers/input/touchscreen/usbtouchscreen.c ABS_MT_POSITION_Y, 2*begin_y+h); h 1110 drivers/input/touchscreen/usbtouchscreen.c ABS_MT_ORIENTATION, w > h); h 1115 drivers/input/touchscreen/usbtouchscreen.c usbtouch->y = 2 * begin_y + h; h 453 drivers/iommu/amd_iommu_init.c static inline u32 get_ivhd_header_size(struct ivhd_header *h) h 457 drivers/iommu/amd_iommu_init.c switch (h->type) { h 498 drivers/iommu/amd_iommu_init.c static int __init find_last_devid_from_ivhd(struct ivhd_header *h) h 500 drivers/iommu/amd_iommu_init.c u8 *p = (void *)h, *end = (void *)h; h 503 drivers/iommu/amd_iommu_init.c u32 ivhd_size = get_ivhd_header_size(h); h 506 drivers/iommu/amd_iommu_init.c pr_err("Unsupported IVHD type %#x\n", h->type); h 511 drivers/iommu/amd_iommu_init.c end += h->length; h 562 drivers/iommu/amd_iommu_init.c struct ivhd_header *h; h 568 drivers/iommu/amd_iommu_init.c h = (struct ivhd_header *)p; h 569 drivers/iommu/amd_iommu_init.c if (h->type == amd_iommu_target_ivhd_type) { h 570 drivers/iommu/amd_iommu_init.c int ret = find_last_devid_from_ivhd(h); h 575 drivers/iommu/amd_iommu_init.c p += h->length; h 1141 drivers/iommu/amd_iommu_init.c struct ivhd_header *h) h 1143 drivers/iommu/amd_iommu_init.c u8 *p = (u8 *)h; h 1162 drivers/iommu/amd_iommu_init.c iommu->acpi_flags = h->flags; h 1167 drivers/iommu/amd_iommu_init.c ivhd_size = get_ivhd_header_size(h); h 1169 drivers/iommu/amd_iommu_init.c pr_err("Unsupported IVHD type %#x\n", h->type); h 1175 drivers/iommu/amd_iommu_init.c end += h->length; h 1338 drivers/iommu/amd_iommu_init.c if (h->type != 0x40) { h 1490 drivers/iommu/amd_iommu_init.c static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) h 1511 drivers/iommu/amd_iommu_init.c iommu->devid = h->devid; h 1512 drivers/iommu/amd_iommu_init.c iommu->cap_ptr = h->cap_ptr; h 1513 drivers/iommu/amd_iommu_init.c iommu->pci_seg = h->pci_seg; h 1514 drivers/iommu/amd_iommu_init.c iommu->mmio_phys = h->mmio_phys; h 1516 drivers/iommu/amd_iommu_init.c switch (h->type) { h 1519 drivers/iommu/amd_iommu_init.c if ((h->efr_attr != 0) && h 1520 drivers/iommu/amd_iommu_init.c ((h->efr_attr & (0xF << 13)) != 0) && h 1521 drivers/iommu/amd_iommu_init.c ((h->efr_attr & (0x3F << 17)) != 0)) h 1525 drivers/iommu/amd_iommu_init.c if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) h 1530 drivers/iommu/amd_iommu_init.c if (h->efr_reg & (1 << 9)) h 1534 drivers/iommu/amd_iommu_init.c if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) h 1542 drivers/iommu/amd_iommu_init.c if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) && h 1543 drivers/iommu/amd_iommu_init.c (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT))) h 1573 drivers/iommu/amd_iommu_init.c ret = init_iommu_from_acpi(iommu, h); h 1623 drivers/iommu/amd_iommu_init.c struct ivhd_header *h; h 1631 drivers/iommu/amd_iommu_init.c h = (struct ivhd_header *)p; h 1636 drivers/iommu/amd_iommu_init.c PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), h 1637 drivers/iommu/amd_iommu_init.c PCI_FUNC(h->devid), h->cap_ptr, h 1638 drivers/iommu/amd_iommu_init.c h->pci_seg, h->flags, h->info); h 1640 drivers/iommu/amd_iommu_init.c h->mmio_phys); h 1646 drivers/iommu/amd_iommu_init.c ret = init_iommu_one(iommu, h); h 1650 drivers/iommu/amd_iommu_init.c p += h->length; h 754 drivers/iommu/dmar.c acpi_handle h; h 759 drivers/iommu/dmar.c &h))) { h 764 drivers/iommu/dmar.c if (acpi_bus_get_device(h, &adev)) { h 350 drivers/irqchip/irq-armada-370-xp.c static int armada_370_xp_mpic_irq_map(struct irq_domain *h, h 127 drivers/irqchip/irq-clps711x.c static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq, h 141 drivers/irqchip/irq-davinci-cp-intc.c static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq, h 322 drivers/irqchip/irq-gic-v3-its.c static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) h 324 drivers/irqchip/irq-gic-v3-its.c u64 mask = GENMASK_ULL(h, l); h 79 drivers/irqchip/irq-imx-irqsteer.c static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq, h 83 drivers/irqchip/irq-imx-irqsteer.c irq_set_chip_data(irq, h->host_data); h 124 drivers/irqchip/irq-keystone.c static int keystone_irq_map(struct irq_domain *h, unsigned int virq, h 127 drivers/irqchip/irq-keystone.c struct keystone_irq_device *kirq = h->host_data; h 63 drivers/irqchip/irq-renesas-h8300h.c static int irq_map(struct irq_domain *h, unsigned int virq, h 69 drivers/irqchip/irq-renesas-h8s.c static __init int irq_map(struct irq_domain *h, unsigned int virq, h 329 drivers/irqchip/irq-renesas-intc-irqpin.c static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, h 332 drivers/irqchip/irq-renesas-intc-irqpin.c struct intc_irqpin_priv *p = h->host_data; h 338 drivers/irqchip/irq-renesas-intc-irqpin.c irq_set_chip_data(virq, h->host_data); h 407 drivers/irqchip/irq-s3c24xx.c static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq, h 410 drivers/irqchip/irq-s3c24xx.c struct s3c_irq_intc *intc = h->host_data; h 1139 drivers/irqchip/irq-s3c24xx.c static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq, h 153 drivers/irqchip/irq-vt8500.c static int vt8500_irq_map(struct irq_domain *h, unsigned int virq, h 135 drivers/isdn/hardware/mISDN/isdnhdlc.c #define handle_fast_flag(h) \ h 137 drivers/isdn/hardware/mISDN/isdnhdlc.c if (h->cbin == fast_flag[h->bit_shift]) { \ h 138 drivers/isdn/hardware/mISDN/isdnhdlc.c h->ffvalue = fast_flag_value[h->bit_shift]; \ h 139 drivers/isdn/hardware/mISDN/isdnhdlc.c h->state = HDLC_FAST_FLAG; \ h 140 drivers/isdn/hardware/mISDN/isdnhdlc.c h->ffbit_shift = h->bit_shift; \ h 141 drivers/isdn/hardware/mISDN/isdnhdlc.c h->bit_shift = 1; \ h 143 drivers/isdn/hardware/mISDN/isdnhdlc.c h->state = HDLC_GET_DATA; \ h 144 drivers/isdn/hardware/mISDN/isdnhdlc.c h->data_received = 0; \ h 148 drivers/isdn/hardware/mISDN/isdnhdlc.c #define handle_abort(h) \ h 150 drivers/isdn/hardware/mISDN/isdnhdlc.c h->shift_reg = fast_abort[h->ffbit_shift - 1]; \ h 151 drivers/isdn/hardware/mISDN/isdnhdlc.c h->hdlc_bits1 = h->ffbit_shift - 2; \ h 152 drivers/isdn/hardware/mISDN/isdnhdlc.c if (h->hdlc_bits1 < 0) \ h 153 drivers/isdn/hardware/mISDN/isdnhdlc.c h->hdlc_bits1 = 0; \ h 154 drivers/isdn/hardware/mISDN/isdnhdlc.c h->data_bits = h->ffbit_shift - 1; \ h 155 drivers/isdn/hardware/mISDN/isdnhdlc.c h->state = HDLC_GET_DATA; \ h 156 drivers/isdn/hardware/mISDN/isdnhdlc.c h->data_received = 0; \ h 28 drivers/isdn/hardware/mISDN/mISDNipac.c #define ReadHSCX(h, o) (h->ip->read_reg(h->ip->hw, h->off + o)) h 29 drivers/isdn/hardware/mISDN/mISDNipac.c #define WriteHSCX(h, o, v) (h->ip->write_reg(h->ip->hw, h->off + o, v)) h 76 drivers/mailbox/bcm-pdc-mailbox.c #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask)) h 77 drivers/mailbox/bcm-pdc-mailbox.c #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask)) h 672 drivers/md/bcache/sysfs.c struct hlist_head *h; h 676 drivers/md/bcache/sysfs.c for (h = c->bucket_hash; h 677 drivers/md/bcache/sysfs.c h < c->bucket_hash + (1 << BUCKET_HASH_BITS); h 678 drivers/md/bcache/sysfs.c h++) { h 682 drivers/md/bcache/sysfs.c hlist_for_each(p, h) h 58 drivers/md/bcache/util.h #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) h 60 drivers/md/bcache/util.h #define heap_sift(h, i, cmp) \ h 64 drivers/md/bcache/util.h for (; _j * 2 + 1 < (h)->used; _j = _r) { \ h 66 drivers/md/bcache/util.h if (_r + 1 < (h)->used && \ h 67 drivers/md/bcache/util.h cmp((h)->data[_r], (h)->data[_r + 1])) \ h 70 drivers/md/bcache/util.h if (cmp((h)->data[_r], (h)->data[_j])) \ h 72 drivers/md/bcache/util.h heap_swap(h, _r, _j); \ h 76 drivers/md/bcache/util.h #define heap_sift_down(h, i, cmp) \ h 80 drivers/md/bcache/util.h if (cmp((h)->data[i], (h)->data[p])) \ h 82 drivers/md/bcache/util.h heap_swap(h, i, p); \ h 87 drivers/md/bcache/util.h #define heap_add(h, d, cmp) \ h 89 drivers/md/bcache/util.h bool _r = !heap_full(h); \ h 91 drivers/md/bcache/util.h size_t _i = (h)->used++; \ h 92 drivers/md/bcache/util.h (h)->data[_i] = d; \ h 94 drivers/md/bcache/util.h heap_sift_down(h, _i, cmp); \ h 95 drivers/md/bcache/util.h heap_sift(h, _i, cmp); \ h 100 drivers/md/bcache/util.h #define heap_pop(h, d, cmp) \ h 102 drivers/md/bcache/util.h bool _r = (h)->used; \ h 104 drivers/md/bcache/util.h (d) = (h)->data[0]; \ h 105 drivers/md/bcache/util.h (h)->used--; \ h 106 drivers/md/bcache/util.h heap_swap(h, 0, (h)->used); \ h 107 drivers/md/bcache/util.h heap_sift(h, 0, cmp); \ h 112 drivers/md/bcache/util.h #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) h 114 drivers/md/bcache/util.h #define heap_full(h) ((h)->used == (h)->size) h 624 drivers/md/dm-cache-policy-smq.c unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); h 625 drivers/md/dm-cache-policy-smq.c __h_insert(ht, h, e); h 628 drivers/md/dm-cache-policy-smq.c static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock, h 634 drivers/md/dm-cache-policy-smq.c for (e = h_head(ht, h); e; e = h_next(ht, e)) { h 644 drivers/md/dm-cache-policy-smq.c static void __h_unlink(struct smq_hash_table *ht, unsigned h, h 650 drivers/md/dm-cache-policy-smq.c ht->buckets[h] = e->hash_next; h 659 drivers/md/dm-cache-policy-smq.c unsigned h = hash_64(from_oblock(oblock), ht->hash_bits); h 661 drivers/md/dm-cache-policy-smq.c e = __h_lookup(ht, h, oblock, &prev); h 667 drivers/md/dm-cache-policy-smq.c __h_unlink(ht, h, e, prev); h 668 drivers/md/dm-cache-policy-smq.c __h_insert(ht, h, e); h 676 drivers/md/dm-cache-policy-smq.c unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); h 683 drivers/md/dm-cache-policy-smq.c e = __h_lookup(ht, h, e->oblock, &prev); h 685 drivers/md/dm-cache-policy-smq.c __h_unlink(ht, h, e, prev); h 296 drivers/md/dm-cache-target.c static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, h 299 drivers/md/dm-cache-target.c h->bi_end_io = bio->bi_end_io; h 305 drivers/md/dm-cache-target.c static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) h 307 drivers/md/dm-cache-target.c bio->bi_end_io = h->bi_end_io; h 551 drivers/md/dm-clone-target.c struct hlist_node h; h 619 drivers/md/dm-clone-target.c hlist_for_each_entry(hd, &bucket->head, h) { h 635 drivers/md/dm-clone-target.c hlist_add_head(&hd->h, &bucket->head); h 691 drivers/md/dm-clone-target.c INIT_HLIST_NODE(&hd->h); h 718 drivers/md/dm-clone-target.c hlist_del(&hd->h); h 936 drivers/md/dm-clone-target.c hlist_del(&hd->h); h 101 drivers/md/dm-ioctl.c unsigned int h = 0; h 104 drivers/md/dm-ioctl.c h = (h + (unsigned int) *str++) * hash_mult; h 106 drivers/md/dm-ioctl.c return h & MASK_BUCKETS; h 115 drivers/md/dm-ioctl.c unsigned int h = hash_str(str); h 117 drivers/md/dm-ioctl.c list_for_each_entry (hc, _name_buckets + h, name_list) h 129 drivers/md/dm-ioctl.c unsigned int h = hash_str(str); h 131 drivers/md/dm-ioctl.c list_for_each_entry (hc, _uuid_buckets + h, uuid_list) h 239 drivers/md/dm-region-hash.c unsigned h; h 243 drivers/md/dm-region-hash.c for (h = 0; h < rh->nr_buckets; h++) { h 244 drivers/md/dm-region-hash.c list_for_each_entry_safe(reg, nreg, rh->buckets + h, h 906 drivers/md/dm-stats.c static int parse_histogram(const char *h, unsigned *n_histogram_entries, h 914 drivers/md/dm-stats.c for (q = h; *q; q++) h 930 drivers/md/dm-stats.c s = sscanf(h, "%llu%c", &hi, &ch); h 939 drivers/md/dm-stats.c h = strchr(h, ',') + 1; h 749 drivers/md/dm-thin.c struct dm_thin_endio_hook *h; h 754 drivers/md/dm-thin.c h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 755 drivers/md/dm-thin.c h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); h 864 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 865 drivers/md/dm-thin.c struct dm_thin_new_mapping *m = h->overwrite_mapping; h 1308 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 1310 drivers/md/dm-thin.c h->overwrite_mapping = m; h 1596 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 1597 drivers/md/dm-thin.c struct thin_c *tc = h->tc; h 1745 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 1752 drivers/md/dm-thin.c h->cell = virt_cell; h 1831 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 1833 drivers/md/dm-thin.c h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); h 1884 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 1886 drivers/md/dm-thin.c h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); h 2705 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 2707 drivers/md/dm-thin.c h->tc = tc; h 2708 drivers/md/dm-thin.c h->shared_read_entry = NULL; h 2709 drivers/md/dm-thin.c h->all_io_entry = NULL; h 2710 drivers/md/dm-thin.c h->overwrite_mapping = NULL; h 2711 drivers/md/dm-thin.c h->cell = NULL; h 4346 drivers/md/dm-thin.c struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); h 4349 drivers/md/dm-thin.c struct pool *pool = h->tc->pool; h 4351 drivers/md/dm-thin.c if (h->shared_read_entry) { h 4353 drivers/md/dm-thin.c dm_deferred_entry_dec(h->shared_read_entry, &work); h 4363 drivers/md/dm-thin.c if (h->all_io_entry) { h 4365 drivers/md/dm-thin.c dm_deferred_entry_dec(h->all_io_entry, &work); h 4375 drivers/md/dm-thin.c if (h->cell) h 4376 drivers/md/dm-thin.c cell_defer_no_holder(h->tc, h->cell); h 76 drivers/md/persistent-data/dm-block-manager.c unsigned h = __find_holder(lock, NULL); h 82 drivers/md/persistent-data/dm-block-manager.c lock->holders[h] = task; h 85 drivers/md/persistent-data/dm-block-manager.c t = lock->traces + h; h 93 drivers/md/persistent-data/dm-block-manager.c unsigned h = __find_holder(lock, task); h 94 drivers/md/persistent-data/dm-block-manager.c lock->holders[h] = NULL; h 27 drivers/md/persistent-data/dm-btree-spine.c struct node_header *h = &n->header; h 29 drivers/md/persistent-data/dm-btree-spine.c h->blocknr = cpu_to_le64(dm_block_location(b)); h 30 drivers/md/persistent-data/dm-btree-spine.c h->csum = cpu_to_le32(dm_bm_checksum(&h->flags, h 42 drivers/md/persistent-data/dm-btree-spine.c struct node_header *h = &n->header; h 47 drivers/md/persistent-data/dm-btree-spine.c if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { h 49 drivers/md/persistent-data/dm-btree-spine.c le64_to_cpu(h->blocknr), dm_block_location(b)); h 53 drivers/md/persistent-data/dm-btree-spine.c csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags, h 56 drivers/md/persistent-data/dm-btree-spine.c if (csum_disk != h->csum) { h 58 drivers/md/persistent-data/dm-btree-spine.c le32_to_cpu(csum_disk), le32_to_cpu(h->csum)); h 62 drivers/md/persistent-data/dm-btree-spine.c value_size = le32_to_cpu(h->value_size); h 65 drivers/md/persistent-data/dm-btree-spine.c (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { h 70 drivers/md/persistent-data/dm-btree-spine.c if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { h 78 drivers/md/persistent-data/dm-btree-spine.c flags = le32_to_cpu(h->flags); h 51 drivers/md/persistent-data/dm-transaction-manager.c unsigned h = prefetch_hash(b); h 54 drivers/md/persistent-data/dm-transaction-manager.c if (p->blocks[h] == PREFETCH_SENTINEL) h 55 drivers/md/persistent-data/dm-transaction-manager.c p->blocks[h] = b; h 1053 drivers/media/common/cx2341x.c u16 h = new->height; h 1057 drivers/media/common/cx2341x.c h /= 2; h 1060 drivers/media/common/cx2341x.c h, w); h 1744 drivers/media/common/cx2341x.c int h = cxhdl->height; h 1757 drivers/media/common/cx2341x.c h /= 2; h 1759 drivers/media/common/cx2341x.c err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w); h 355 drivers/media/common/saa7146/saa7146_hlp.c int x[32], y[32], w[32], h[32]; h 369 drivers/media/common/saa7146/saa7146_hlp.c h[i] = vv->ov.clips[i].c.height; h 374 drivers/media/common/saa7146/saa7146_hlp.c if( h[i] < 0) { h 375 drivers/media/common/saa7146/saa7146_hlp.c y[i] += h[i]; h[i] = -h[i]; h 381 drivers/media/common/saa7146/saa7146_hlp.c h[i] += y[i]; y[i] = 0; h 384 drivers/media/common/saa7146/saa7146_hlp.c y[i] = height - y[i] - h[i]; h 390 drivers/media/common/saa7146/saa7146_hlp.c b = y[i]+h[i]; h 438 drivers/media/common/saa7146/saa7146_hlp.c if( line_list[i] < (y[j] + h[j]) ) { h 89 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h) h 93 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->src_height = tpg->buf_height = h; h 95 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->crop.height = tpg->compose.height = h; h 550 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c int *h, int *s, int *v) h 565 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c *h = 0; h 578 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c *h = 0; h 611 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c *h = aux; h 960 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c int h, s, v; h 962 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c color_to_hsv(tpg, r, g, b, &h, &s, &v); h 963 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->colors[k][0] = h; h 1675 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned h = tpg->src_height; h 1696 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->square.top = (h - sq_h) / 2; h 1700 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.height = h; h 1705 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (3 * w >= 4 * h) { h 1706 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.width = ((4 * h) / 3) & ~1; h 1713 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.top = (h - tpg->border.height) / 2; h 1718 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.top = (h - tpg->border.height) / 2; h 1721 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (9 * w >= 14 * h) { h 1722 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.width = ((14 * h) / 9) & ~1; h 1729 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.top = (h - tpg->border.height) / 2; h 1734 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.top = (h - tpg->border.height) / 2; h 1737 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (9 * w >= 16 * h) { h 1738 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.width = ((16 * h) / 9) & ~1; h 1745 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg->border.top = (h - tpg->border.height) / 2; h 2134 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned h = tpg->buf_height; h 2140 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c h /= tpg->vdownsampling[p]; h 2142 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c basep[p][1] += h * stride / 2; h 2144 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c basep[p][0] += h * stride / 2; h 2284 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned p, unsigned h, u8 *vbuf) h 2383 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned p, unsigned h, u8 *vbuf) h 2405 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (h >= params->hmax) { h 2535 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned h; h 2551 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c for (h = 0; h < tpg->compose.height; h++) { h 2556 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c buf_line = tpg_calc_buffer_line(tpg, h, tpg->field); h 2584 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if ((h & 3) >= 2) h 2592 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (h & 1) h 2600 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg_fill_plane_pattern(tpg, ¶ms, p, h, h 2602 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c tpg_fill_plane_extras(tpg, ¶ms, p, h, h 91 drivers/media/common/videobuf2/videobuf2-memops.c struct vb2_vmarea_handler *h = vma->vm_private_data; h 94 drivers/media/common/videobuf2/videobuf2-memops.c __func__, h, refcount_read(h->refcount), vma->vm_start, h 97 drivers/media/common/videobuf2/videobuf2-memops.c refcount_inc(h->refcount); h 109 drivers/media/common/videobuf2/videobuf2-memops.c struct vb2_vmarea_handler *h = vma->vm_private_data; h 112 drivers/media/common/videobuf2/videobuf2-memops.c __func__, h, refcount_read(h->refcount), vma->vm_start, h 115 drivers/media/common/videobuf2/videobuf2-memops.c h->put(h->arg); h 315 drivers/media/dvb-core/dvb_net.c static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h) h 322 drivers/media/dvb-core/dvb_net.c memcpy(ule_where, h->ts, TS_SZ); h 334 drivers/media/dvb-core/dvb_net.c if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) || h 335 drivers/media/dvb-core/dvb_net.c ((h->ts[3] & TS_SC) != 0)) { h 337 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, h->ts[0], h 338 drivers/media/dvb-core/dvb_net.c (h->ts[1] & TS_TEI) >> 7, h 339 drivers/media/dvb-core/dvb_net.c (h->ts[3] & TS_SC) >> 6); h 342 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_skb) { h 343 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 345 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_errors++; h 346 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_frame_errors++; h 348 drivers/media/dvb-core/dvb_net.c reset_ule(h->priv); h 349 drivers/media/dvb-core/dvb_net.c h->priv->need_pusi = 1; h 352 drivers/media/dvb-core/dvb_net.c h->ts += TS_SZ; h 353 drivers/media/dvb-core/dvb_net.c h->priv->ts_count++; h 357 drivers/media/dvb-core/dvb_net.c h->ts_remain = 184; h 358 drivers/media/dvb-core/dvb_net.c h->from_where = h->ts + 4; h 363 drivers/media/dvb-core/dvb_net.c static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h) h 365 drivers/media/dvb-core/dvb_net.c if (h->ts[1] & TS_PUSI) { h 368 drivers/media/dvb-core/dvb_net.c h->priv->tscc = h->ts[3] & 0x0F; h 370 drivers/media/dvb-core/dvb_net.c if (h->ts[4] > h->ts_remain) { h 372 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, h->ts[4]); h 373 drivers/media/dvb-core/dvb_net.c h->ts += TS_SZ; h 374 drivers/media/dvb-core/dvb_net.c h->priv->ts_count++; h 378 drivers/media/dvb-core/dvb_net.c h->from_where = &h->ts[5] + h->ts[4]; h 379 drivers/media/dvb-core/dvb_net.c h->ts_remain -= 1 + h->ts[4]; h 380 drivers/media/dvb-core/dvb_net.c h->skipped = 0; h 382 drivers/media/dvb-core/dvb_net.c h->skipped++; h 383 drivers/media/dvb-core/dvb_net.c h->ts += TS_SZ; h 384 drivers/media/dvb-core/dvb_net.c h->priv->ts_count++; h 391 drivers/media/dvb-core/dvb_net.c static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h) h 394 drivers/media/dvb-core/dvb_net.c if ((h->ts[3] & 0x0F) == h->priv->tscc) h 395 drivers/media/dvb-core/dvb_net.c h->priv->tscc = (h->priv->tscc + 1) & 0x0F; h 399 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, h->ts[3] & 0x0F, h 400 drivers/media/dvb-core/dvb_net.c h->priv->tscc); h 402 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_skb) { h 403 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 406 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_errors++; h 407 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_frame_errors++; h 409 drivers/media/dvb-core/dvb_net.c reset_ule(h->priv); h 411 drivers/media/dvb-core/dvb_net.c h->priv->need_pusi = 1; h 420 drivers/media/dvb-core/dvb_net.c if (h->ts[1] & TS_PUSI) { h 421 drivers/media/dvb-core/dvb_net.c if (!h->priv->need_pusi) { h 422 drivers/media/dvb-core/dvb_net.c if (!(*h->from_where < (h->ts_remain-1)) || h 423 drivers/media/dvb-core/dvb_net.c *h->from_where != h->priv->ule_sndu_remain) { h 429 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, h 430 drivers/media/dvb-core/dvb_net.c *h->from_where); h 436 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_skb) { h 437 drivers/media/dvb-core/dvb_net.c h->error = true; h 438 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 441 drivers/media/dvb-core/dvb_net.c if (h->error || h->priv->ule_sndu_remain) { h 442 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_errors++; h 443 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_frame_errors++; h 444 drivers/media/dvb-core/dvb_net.c h->error = false; h 447 drivers/media/dvb-core/dvb_net.c reset_ule(h->priv); h 448 drivers/media/dvb-core/dvb_net.c h->priv->need_pusi = 1; h 455 drivers/media/dvb-core/dvb_net.c h->from_where += 1; h 456 drivers/media/dvb-core/dvb_net.c h->ts_remain -= 1; h 458 drivers/media/dvb-core/dvb_net.c h->priv->need_pusi = 0; h 460 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_sndu_remain > 183) { h 465 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_errors++; h 466 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_length_errors++; h 468 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, h 469 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_remain, h 470 drivers/media/dvb-core/dvb_net.c h->ts[4], h->ts_remain); h 471 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 473 drivers/media/dvb-core/dvb_net.c reset_ule(h->priv); h 478 drivers/media/dvb-core/dvb_net.c h->from_where += h->ts[4]; h 479 drivers/media/dvb-core/dvb_net.c h->ts_remain -= h->ts[4]; h 493 drivers/media/dvb-core/dvb_net.c static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h) h 495 drivers/media/dvb-core/dvb_net.c if (h->ts_remain < 2) { h 497 drivers/media/dvb-core/dvb_net.c h->ts_remain); h 498 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_len = 0; h 499 drivers/media/dvb-core/dvb_net.c h->priv->need_pusi = 1; h 500 drivers/media/dvb-core/dvb_net.c h->ts += TS_SZ; h 504 drivers/media/dvb-core/dvb_net.c if (!h->priv->ule_sndu_len) { h 506 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_len = h->from_where[0] << 8 | h 507 drivers/media/dvb-core/dvb_net.c h->from_where[1]; h 508 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_sndu_len & 0x8000) { h 510 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_len &= 0x7FFF; h 511 drivers/media/dvb-core/dvb_net.c h->priv->ule_dbit = 1; h 513 drivers/media/dvb-core/dvb_net.c h->priv->ule_dbit = 0; h 515 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_sndu_len < 5) { h 517 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, h 518 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_len); h 519 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_errors++; h 520 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_length_errors++; h 521 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_len = 0; h 522 drivers/media/dvb-core/dvb_net.c h->priv->need_pusi = 1; h 523 drivers/media/dvb-core/dvb_net.c h->new_ts = 1; h 524 drivers/media/dvb-core/dvb_net.c h->ts += TS_SZ; h 525 drivers/media/dvb-core/dvb_net.c h->priv->ts_count++; h 528 drivers/media/dvb-core/dvb_net.c h->ts_remain -= 2; /* consume the 2 bytes SNDU length. */ h 529 drivers/media/dvb-core/dvb_net.c h->from_where += 2; h 532 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2; h 540 drivers/media/dvb-core/dvb_net.c switch (h->ts_remain) { h 542 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_remain--; h 543 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_type = h->from_where[0] << 8; h 546 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_type_1 = 1; h 547 drivers/media/dvb-core/dvb_net.c h->ts_remain -= 1; h 548 drivers/media/dvb-core/dvb_net.c h->from_where += 1; h 551 drivers/media/dvb-core/dvb_net.c h->new_ts = 1; h 552 drivers/media/dvb-core/dvb_net.c h->ts += TS_SZ; h 553 drivers/media/dvb-core/dvb_net.c h->priv->ts_count++; h 558 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_sndu_type_1) { h 559 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_type_1 = 0; h 560 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_type |= h->from_where[0]; h 561 drivers/media/dvb-core/dvb_net.c h->from_where += 1; /* points to payload start. */ h 562 drivers/media/dvb-core/dvb_net.c h->ts_remain -= 1; h 565 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_type = h->from_where[0] << 8 | h 566 drivers/media/dvb-core/dvb_net.c h->from_where[1]; h 567 drivers/media/dvb-core/dvb_net.c h->from_where += 2; /* points to payload start. */ h 568 drivers/media/dvb-core/dvb_net.c h->ts_remain -= 2; h 580 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len + h 582 drivers/media/dvb-core/dvb_net.c if (!h->priv->ule_skb) { h 584 drivers/media/dvb-core/dvb_net.c h->dev->name); h 585 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_dropped++; h 590 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_remain = h->priv->ule_sndu_len; h 591 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb->dev = h->dev; h 596 drivers/media/dvb-core/dvb_net.c skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN); h 602 drivers/media/dvb-core/dvb_net.c static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h) h 612 drivers/media/dvb-core/dvb_net.c if (h->priv->rx_mode == RX_MODE_PROMISC) h 615 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_skb->data[0] & 0x01) { h 617 drivers/media/dvb-core/dvb_net.c if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) { h 619 drivers/media/dvb-core/dvb_net.c if (h->priv->rx_mode == RX_MODE_MULTI) { h 622 drivers/media/dvb-core/dvb_net.c for (i = 0; i < h->priv->multi_num && h 623 drivers/media/dvb-core/dvb_net.c !ether_addr_equal(h->priv->ule_skb->data, h 624 drivers/media/dvb-core/dvb_net.c h->priv->multi_macs[i]); h 627 drivers/media/dvb-core/dvb_net.c if (i == h->priv->multi_num) h 629 drivers/media/dvb-core/dvb_net.c } else if (h->priv->rx_mode != RX_MODE_ALL_MULTI) h 637 drivers/media/dvb-core/dvb_net.c } else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr)) h 644 drivers/media/dvb-core/dvb_net.c static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h, h 652 drivers/media/dvb-core/dvb_net.c h->priv->ts_count, ule_crc, expected_crc, h 653 drivers/media/dvb-core/dvb_net.c h->priv->ule_sndu_len, h->priv->ule_sndu_type, h 654 drivers/media/dvb-core/dvb_net.c h->ts_remain, h 655 drivers/media/dvb-core/dvb_net.c h->ts_remain > 2 ? h 656 drivers/media/dvb-core/dvb_net.c *(unsigned short *)h->from_where : 0); h 676 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_errors++; h 677 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_crc_errors++; h 678 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 686 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb->tail -= 4; h 687 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb->len -= 4; h 689 drivers/media/dvb-core/dvb_net.c if (!h->priv->ule_dbit) { h 690 drivers/media/dvb-core/dvb_net.c if (dvb_net_ule_should_drop(h)) { h 691 drivers/media/dvb-core/dvb_net.c netdev_dbg(h->dev, h 693 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb->data, h->dev->dev_addr); h 694 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 698 drivers/media/dvb-core/dvb_net.c skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, h 700 drivers/media/dvb-core/dvb_net.c skb_pull(h->priv->ule_skb, ETH_ALEN); h 707 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) { h 709 drivers/media/dvb-core/dvb_net.c int l = handle_ule_extensions(h->priv); h 718 drivers/media/dvb-core/dvb_net.c dev_kfree_skb(h->priv->ule_skb); h 721 drivers/media/dvb-core/dvb_net.c skb_pull(h->priv->ule_skb, l); h 732 drivers/media/dvb-core/dvb_net.c if (!h->priv->ule_bridged) { h 733 drivers/media/dvb-core/dvb_net.c skb_push(h->priv->ule_skb, ETH_HLEN); h 734 drivers/media/dvb-core/dvb_net.c h->ethh = (struct ethhdr *)h->priv->ule_skb->data; h 735 drivers/media/dvb-core/dvb_net.c memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN); h 736 drivers/media/dvb-core/dvb_net.c eth_zero_addr(h->ethh->h_source); h 737 drivers/media/dvb-core/dvb_net.c h->ethh->h_proto = htons(h->priv->ule_sndu_type); h 740 drivers/media/dvb-core/dvb_net.c h->priv->ule_bridged = 0; h 743 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb, h 744 drivers/media/dvb-core/dvb_net.c h->dev); h 750 drivers/media/dvb-core/dvb_net.c if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST) h 751 drivers/media/dvb-core/dvb_net.c h->priv->ule_skb->pkt_type = PACKET_HOST; h 753 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_packets++; h 754 drivers/media/dvb-core/dvb_net.c h->dev->stats.rx_bytes += h->priv->ule_skb->len; h 755 drivers/media/dvb-core/dvb_net.c netif_rx(h->priv->ule_skb); h 761 drivers/media/dvb-core/dvb_net.c struct dvb_net_ule_handle h = { h 781 drivers/media/dvb-core/dvb_net.c for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len; h 782 drivers/media/dvb-core/dvb_net.c h.ts < h.ts_end; /* no incr. */) { h 783 drivers/media/dvb-core/dvb_net.c if (h.new_ts) { h 785 drivers/media/dvb-core/dvb_net.c if (dvb_net_ule_new_ts_cell(&h)) h 790 drivers/media/dvb-core/dvb_net.c if (h.priv->need_pusi) { h 791 drivers/media/dvb-core/dvb_net.c if (dvb_net_ule_ts_pusi(&h)) h 795 drivers/media/dvb-core/dvb_net.c if (h.new_ts) { h 796 drivers/media/dvb-core/dvb_net.c if (dvb_net_ule_new_ts(&h)) h 801 drivers/media/dvb-core/dvb_net.c if (h.priv->ule_skb == NULL) { h 802 drivers/media/dvb-core/dvb_net.c ret = dvb_net_ule_new_payload(&h); h 810 drivers/media/dvb-core/dvb_net.c h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain); h 811 drivers/media/dvb-core/dvb_net.c skb_put_data(h.priv->ule_skb, h.from_where, h.how_much); h 812 drivers/media/dvb-core/dvb_net.c h.priv->ule_sndu_remain -= h.how_much; h 813 drivers/media/dvb-core/dvb_net.c h.ts_remain -= h.how_much; h 814 drivers/media/dvb-core/dvb_net.c h.from_where += h.how_much; h 817 drivers/media/dvb-core/dvb_net.c if (h.priv->ule_sndu_remain <= 0) { h 819 drivers/media/dvb-core/dvb_net.c __be16 ulen = htons(h.priv->ule_sndu_len); h 820 drivers/media/dvb-core/dvb_net.c __be16 utype = htons(h.priv->ule_sndu_type); h 825 drivers/media/dvb-core/dvb_net.c { h.priv->ule_skb->data, h 826 drivers/media/dvb-core/dvb_net.c h.priv->ule_skb->len - 4 } h 829 drivers/media/dvb-core/dvb_net.c if (h.priv->ule_dbit) { h 836 drivers/media/dvb-core/dvb_net.c tail = skb_tail_pointer(h.priv->ule_skb); h 842 drivers/media/dvb-core/dvb_net.c dvb_net_ule_check_crc(&h, iov, ule_crc, expected_crc); h 845 drivers/media/dvb-core/dvb_net.c reset_ule(h.priv); h 849 drivers/media/dvb-core/dvb_net.c if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) { h 851 drivers/media/dvb-core/dvb_net.c h.new_ts = 0; h 852 drivers/media/dvb-core/dvb_net.c h.priv->ule_skb = NULL; h 853 drivers/media/dvb-core/dvb_net.c h.priv->ule_sndu_type_1 = 0; h 854 drivers/media/dvb-core/dvb_net.c h.priv->ule_sndu_len = 0; h 861 drivers/media/dvb-core/dvb_net.c h.new_ts = 1; h 862 drivers/media/dvb-core/dvb_net.c h.ts += TS_SZ; h 863 drivers/media/dvb-core/dvb_net.c h.priv->ts_count++; h 864 drivers/media/dvb-core/dvb_net.c if (h.priv->ule_skb == NULL) { h 865 drivers/media/dvb-core/dvb_net.c h.priv->need_pusi = 1; h 866 drivers/media/dvb-core/dvb_net.c h.priv->ule_sndu_type_1 = 0; h 867 drivers/media/dvb-core/dvb_net.c h.priv->ule_sndu_len = 0; h 1503 drivers/media/dvb-frontends/dib0090.c u8 c, h, n; h 1528 drivers/media/dvb-frontends/dib0090.c h = (e2 >> 6) & 0x3f; h 1534 drivers/media/dvb-frontends/dib0090.c if ((h >= HR_MAX) || (h <= HR_MIN)) h 1535 drivers/media/dvb-frontends/dib0090.c h = 34; h 1539 drivers/media/dvb-frontends/dib0090.c dib0090_write_reg(state, 0x13, (h << 10)); h 1540 drivers/media/dvb-frontends/dib0090.c e2 = (n << 11) | ((h >> 2)<<6) | c; h 3569 drivers/media/dvb-frontends/stv090x.c u32 reg, h, m, l; h 3578 drivers/media/dvb-frontends/stv090x.c h = STV090x_GETFIELD_Px(reg, ERR_CNT2_FIELD); h 3586 drivers/media/dvb-frontends/stv090x.c *per = ((h << 16) | (m << 8) | l); h 103 drivers/media/firewire/firedtv-fw.c __be32 *h, *h_end; h 107 drivers/media/firewire/firedtv-fw.c for (h = header, h_end = h + header_length / 4; h < h_end; h++) { h 108 drivers/media/firewire/firedtv-fw.c length = be32_to_cpup(h) >> 16; h 200 drivers/media/i2c/adv748x/adv748x.h #define adv748x_hdmi_to_state(h) container_of(h, struct adv748x_state, hdmi) h 1566 drivers/media/i2c/adv7604.c u32 w, h; h 1569 drivers/media/i2c/adv7604.c h = hdmi_read16(sd, 0x09, info->field0_height_mask); h 1575 drivers/media/i2c/adv7604.c bt->width == w && bt->height == h) h 1581 drivers/media/i2c/adv7604.c bt->height = h; h 819 drivers/media/i2c/imx274.c int h, int ask_h, u32 flags) h 828 drivers/media/i2c/imx274.c if (h < ask_h) h 835 drivers/media/i2c/imx274.c if (h > ask_h) h 840 drivers/media/i2c/imx274.c val -= abs(h - ask_h); h 843 drivers/media/i2c/imx274.c __func__, ask_w, ask_h, w, h, val); h 304 drivers/media/i2c/max2175.c static inline struct max2175 *max2175_from_ctrl_hdl(struct v4l2_ctrl_handler *h) h 306 drivers/media/i2c/max2175.c return container_of(h, struct max2175, ctrl_hdl); h 576 drivers/media/i2c/ov2640.c #define OV2640_SIZE(n, w, h, r) \ h 577 drivers/media/i2c/ov2640.c {.name = n, .width = w , .height = h, .regs = r } h 1257 drivers/media/i2c/s5c73m3/s5c73m3-core.c unsigned w, h; h 1266 drivers/media/i2c/s5c73m3/s5c73m3-core.c h = mf->height; h 1272 drivers/media/i2c/s5c73m3/s5c73m3-core.c h = fs->height; h 1275 drivers/media/i2c/s5c73m3/s5c73m3-core.c fse->max_height = fse->min_height = h; h 309 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h) h 311 drivers/media/i2c/saa6752hs.c struct saa6752hs_mpeg_params *params = &h->params; h 360 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h = h 367 drivers/media/i2c/saa6752hs.c h->video_bitrate_peak->val < h->video_bitrate->val) h 368 drivers/media/i2c/saa6752hs.c h->video_bitrate_peak->val = h->video_bitrate->val; h 376 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h = h 378 drivers/media/i2c/saa6752hs.c struct saa6752hs_mpeg_params *params = &h->params; h 413 drivers/media/i2c/saa6752hs.c params->vi_bitrate = h->video_bitrate->val / 1000; h 414 drivers/media/i2c/saa6752hs.c params->vi_bitrate_peak = h->video_bitrate_peak->val / 1000; h 415 drivers/media/i2c/saa6752hs.c v4l2_ctrl_activate(h->video_bitrate_peak, h 427 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h = to_state(sd); h 435 drivers/media/i2c/saa6752hs.c set_reg8(client, 0x41, h->video_format); h 438 drivers/media/i2c/saa6752hs.c set_reg8(client, 0x40, (h->standard & V4L2_STD_525_60) ? 1 : 0); h 441 drivers/media/i2c/saa6752hs.c saa6752hs_set_bitrate(client, h); h 463 drivers/media/i2c/saa6752hs.c localPAT[17] = 0xe0 | ((h->params.ts_pid_pmt >> 8) & 0x0f); h 464 drivers/media/i2c/saa6752hs.c localPAT[18] = h->params.ts_pid_pmt & 0xff; h 472 drivers/media/i2c/saa6752hs.c if (h->params.au_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3) { h 479 drivers/media/i2c/saa6752hs.c localPMT[3] = 0x40 | ((h->params.ts_pid_pmt >> 8) & 0x0f); h 480 drivers/media/i2c/saa6752hs.c localPMT[4] = h->params.ts_pid_pmt & 0xff; h 481 drivers/media/i2c/saa6752hs.c localPMT[15] = 0xE0 | ((h->params.ts_pid_pcr >> 8) & 0x0F); h 482 drivers/media/i2c/saa6752hs.c localPMT[16] = h->params.ts_pid_pcr & 0xFF; h 483 drivers/media/i2c/saa6752hs.c localPMT[20] = 0xE0 | ((h->params.ts_pid_video >> 8) & 0x0F); h 484 drivers/media/i2c/saa6752hs.c localPMT[21] = h->params.ts_pid_video & 0xFF; h 485 drivers/media/i2c/saa6752hs.c localPMT[25] = 0xE0 | ((h->params.ts_pid_audio >> 8) & 0x0F); h 486 drivers/media/i2c/saa6752hs.c localPMT[26] = h->params.ts_pid_audio & 0xFF; h 494 drivers/media/i2c/saa6752hs.c set_reg16(client, 0xc1, h->params.ts_pid_audio); h 497 drivers/media/i2c/saa6752hs.c set_reg16(client, 0xc0, h->params.ts_pid_video); h 500 drivers/media/i2c/saa6752hs.c set_reg16(client, 0xc4, h->params.ts_pid_pcr); h 529 drivers/media/i2c/saa6752hs.c switch (h->params.vi_aspect) { h 550 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h = to_state(sd); h 555 drivers/media/i2c/saa6752hs.c if (h->video_format == SAA6752HS_VF_UNKNOWN) h 556 drivers/media/i2c/saa6752hs.c h->video_format = SAA6752HS_VF_D1; h 557 drivers/media/i2c/saa6752hs.c f->width = v4l2_format_table[h->video_format].fmt.pix.width; h 558 drivers/media/i2c/saa6752hs.c f->height = v4l2_format_table[h->video_format].fmt.pix.height; h 570 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h = to_state(sd); h 618 drivers/media/i2c/saa6752hs.c h->video_format = SAA6752HS_VF_D1; h 620 drivers/media/i2c/saa6752hs.c h->video_format = SAA6752HS_VF_2_3_D1; h 622 drivers/media/i2c/saa6752hs.c h->video_format = SAA6752HS_VF_1_2_D1; h 624 drivers/media/i2c/saa6752hs.c h->video_format = SAA6752HS_VF_SIF; h 630 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h = to_state(sd); h 632 drivers/media/i2c/saa6752hs.c h->standard = std; h 665 drivers/media/i2c/saa6752hs.c struct saa6752hs_state *h; h 674 drivers/media/i2c/saa6752hs.c h = devm_kzalloc(&client->dev, sizeof(*h), GFP_KERNEL); h 675 drivers/media/i2c/saa6752hs.c if (h == NULL) h 677 drivers/media/i2c/saa6752hs.c sd = &h->sd; h 682 drivers/media/i2c/saa6752hs.c h->revision = (data[8] << 8) | data[9]; h 683 drivers/media/i2c/saa6752hs.c h->has_ac3 = 0; h 684 drivers/media/i2c/saa6752hs.c if (h->revision == 0x0206) { h 685 drivers/media/i2c/saa6752hs.c h->has_ac3 = 1; h 688 drivers/media/i2c/saa6752hs.c h->params = param_defaults; h 690 drivers/media/i2c/saa6752hs.c hdl = &h->hdl; h 694 drivers/media/i2c/saa6752hs.c h->has_ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : h 705 drivers/media/i2c/saa6752hs.c if (h->has_ac3) h 730 drivers/media/i2c/saa6752hs.c h->video_bitrate_peak = v4l2_ctrl_new_std(hdl, &saa6752hs_ctrl_ops, h 740 drivers/media/i2c/saa6752hs.c h->video_bitrate_mode = v4l2_ctrl_new_std_menu(hdl, &saa6752hs_ctrl_ops, h 744 drivers/media/i2c/saa6752hs.c h->video_bitrate = v4l2_ctrl_new_std(hdl, &saa6752hs_ctrl_ops, h 761 drivers/media/i2c/saa6752hs.c v4l2_ctrl_cluster(3, &h->video_bitrate_mode); h 763 drivers/media/i2c/saa6752hs.c h->standard = 0; /* Assume 625 input lines */ h 1853 drivers/media/i2c/smiapp/smiapp-core.c int h, int ask_h, u32 flags) h 1861 drivers/media/i2c/smiapp/smiapp-core.c h &= ~1; h 1867 drivers/media/i2c/smiapp/smiapp-core.c if (h < ask_h) h 1874 drivers/media/i2c/smiapp/smiapp-core.c if (h > ask_h) h 1879 drivers/media/i2c/smiapp/smiapp-core.c val -= abs(h - ask_h); h 1885 drivers/media/i2c/smiapp/smiapp-core.c w, ask_w, h, ask_h, val); h 850 drivers/media/pci/cx18/cx18-fileops.c u32 h; h 852 drivers/media/pci/cx18/cx18-fileops.c h = cx18_find_handle(cx); h 853 drivers/media/pci/cx18/cx18-fileops.c if (h != CX18_INVALID_TASK_HANDLE) h 854 drivers/media/pci/cx18/cx18-fileops.c cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, h, 1); h 863 drivers/media/pci/cx18/cx18-fileops.c u32 h; h 865 drivers/media/pci/cx18/cx18-fileops.c h = cx18_find_handle(cx); h 866 drivers/media/pci/cx18/cx18-fileops.c if (h != CX18_INVALID_TASK_HANDLE) { h 868 drivers/media/pci/cx18/cx18-fileops.c cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2, h, 12); h 869 drivers/media/pci/cx18/cx18-fileops.c cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, h, 0); h 208 drivers/media/pci/cx18/cx18-ioctl.c int h = fmt->fmt.pix.height; h 215 drivers/media/pci/cx18/cx18-ioctl.c h &= ~0x1f; h 218 drivers/media/pci/cx18/cx18-ioctl.c h = min(h, cx->is_50hz ? 576 : 480); h 219 drivers/media/pci/cx18/cx18-ioctl.c h = max(h, min_h); h 222 drivers/media/pci/cx18/cx18-ioctl.c fmt->fmt.pix.height = h; h 261 drivers/media/pci/cx18/cx18-ioctl.c int w, h; h 267 drivers/media/pci/cx18/cx18-ioctl.c h = fmt->fmt.pix.height; h 269 drivers/media/pci/cx18/cx18-ioctl.c if (cx->cxhdl.width == w && cx->cxhdl.height == h && h 280 drivers/media/pci/cx18/cx18-ioctl.c s->vb_bytes_per_frame = h * 720 * 3 / 2; h 283 drivers/media/pci/cx18/cx18-ioctl.c s->vb_bytes_per_frame = h * 720 * 2; h 288 drivers/media/pci/cx18/cx18-ioctl.c format.format.height = cx->cxhdl.height = h; h 914 drivers/media/pci/cx18/cx18-ioctl.c u32 h; h 936 drivers/media/pci/cx18/cx18-ioctl.c h = cx18_find_handle(cx); h 937 drivers/media/pci/cx18/cx18-ioctl.c if (h == CX18_INVALID_TASK_HANDLE) { h 942 drivers/media/pci/cx18/cx18-ioctl.c cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, h); h 952 drivers/media/pci/cx18/cx18-ioctl.c h = cx18_find_handle(cx); h 953 drivers/media/pci/cx18/cx18-ioctl.c if (h == CX18_INVALID_TASK_HANDLE) { h 957 drivers/media/pci/cx18/cx18-ioctl.c cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, h); h 472 drivers/media/pci/ivtv/ivtv-ioctl.c int h = fmt->fmt.pix.height; h 479 drivers/media/pci/ivtv/ivtv-ioctl.c h &= ~0x1f; h 482 drivers/media/pci/ivtv/ivtv-ioctl.c h = min(h, itv->is_50hz ? 576 : 480); h 483 drivers/media/pci/ivtv/ivtv-ioctl.c h = max(h, min_h); h 486 drivers/media/pci/ivtv/ivtv-ioctl.c fmt->fmt.pix.height = h; h 520 drivers/media/pci/ivtv/ivtv-ioctl.c s32 h = fmt->fmt.pix.height; h 541 drivers/media/pci/ivtv/ivtv-ioctl.c h = min(h, 576); h 542 drivers/media/pci/ivtv/ivtv-ioctl.c h = max(h, 2); h 546 drivers/media/pci/ivtv/ivtv-ioctl.c fmt->fmt.pix.height = h; h 581 drivers/media/pci/ivtv/ivtv-ioctl.c int h = fmt->fmt.pix.height; h 586 drivers/media/pci/ivtv/ivtv-ioctl.c if (itv->cxhdl.width == w && itv->cxhdl.height == h) h 593 drivers/media/pci/ivtv/ivtv-ioctl.c itv->cxhdl.height = h; h 597 drivers/media/pci/ivtv/ivtv-ioctl.c format.format.height = h; h 192 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_PB_HSIZE(h) ((h)<<12) h 295 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_ZOOM_SX(h) (((h)/2)<<11) h 507 drivers/media/pci/ttpci/av7110.c av7110->video_size.h = h_ar & 0xfff; h 511 drivers/media/pci/ttpci/av7110.c event.u.size.h = av7110->video_size.h; h 531 drivers/media/pci/ttpci/av7110.c av7110->video_size.w, av7110->video_size.h, h 991 drivers/media/pci/ttpci/av7110_hw.c uint w, h, bpp, bpl, size, lpb, bnum, brest; h 996 drivers/media/pci/ttpci/av7110_hw.c h = y1 - y0 + 1; h 999 drivers/media/pci/ttpci/av7110_hw.c if (w <= 0 || w > 720 || h <= 0 || h > 576) h 1003 drivers/media/pci/ttpci/av7110_hw.c size = h * bpl; h 574 drivers/media/platform/atmel/atmel-isc-base.c u32 h, w; h 576 drivers/media/platform/atmel/atmel-isc-base.c h = isc->fmt.fmt.pix.height; h 586 drivers/media/platform/atmel/atmel-isc-base.c h <<= 1; h 603 drivers/media/platform/atmel/atmel-isc-base.c (ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK)); h 307 drivers/media/platform/coda/coda-common.c unsigned int w, h; h 312 drivers/media/platform/coda/coda-common.c h = codec->max_h; h 314 drivers/media/platform/coda/coda-common.c for (k = 0, w = 0, h = 0; k < num_codecs; k++) { h 316 drivers/media/platform/coda/coda-common.c h = max(h, codecs[k].max_h); h 323 drivers/media/platform/coda/coda-common.c *max_h = h; h 508 drivers/media/platform/exynos-gsc/gsc-core.c void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h) h 510 drivers/media/platform/exynos-gsc/gsc-core.c if (tmp_w != *w || tmp_h != *h) { h 512 drivers/media/platform/exynos-gsc/gsc-core.c *w, *h, tmp_w, tmp_h); h 514 drivers/media/platform/exynos-gsc/gsc-core.c *h = tmp_h; h 390 drivers/media/platform/exynos-gsc/gsc-core.h void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h); h 162 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c u32 *h, unsigned int hmin, h 168 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c height = *h; h 172 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0); h 175 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c if (*h < height && (*h + h_step) <= hmax) h 176 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c *h += h_step; h 189 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c pix_mp->height = q_data->h; h 244 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c u32 h = pix_mp->height * fmt->v_sample[i] / 4; h 248 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c pfmt->sizeimage = stride * h; h 281 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c pix_mp->height = q_data->h; h 382 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q_data->h = pix_mp->height; h 394 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q_data->w, q_data->h); h 466 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c s->r.height = ctx->out_q.h; h 473 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c s->r.height = ctx->cap_q.h; h 496 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c s->r.height = ctx->out_q.h; h 607 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c if (q_data->w != param->pic_w || q_data->h != param->pic_h) { h 630 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q_data->h = param->pic_h; h 634 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q_data->h = param->dec_h; h 969 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->h = MTK_JPEG_MIN_HEIGHT; h 977 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->h = MTK_JPEG_MIN_HEIGHT; h 981 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c u32 h = q->h * q->fmt->v_sample[i] / 4; h 984 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->sizeimage[i] = stride * h; h 99 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h u32 h; h 85 drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h int32_t h; h 153 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c u32 *h, unsigned int hmin, h 160 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c org_h = *h; h 163 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0); h 169 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c if (*h < org_h && (*h + step_h) <= hmax) h 170 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c *h += step_h; h 267 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c static void mtk_mdp_check_crop_change(u32 new_w, u32 new_h, u32 *w, u32 *h) h 269 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c if (new_w != *w || new_h != *h) { h 271 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c *w, *h, new_w, new_h); h 274 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c *h = new_h; h 85 drivers/media/platform/mtk-mdp/mtk_mdp_regs.c config->h = frame->height; h 116 drivers/media/platform/mtk-mdp/mtk_mdp_regs.c config->h = frame->height; h 36 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_err(h, fmt, args...) \ h 38 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h ((struct mtk_vcodec_ctx *)h->ctx)->id, __func__, ##args) h 53 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_debug(h, fmt, args...) \ h 57 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h ((struct mtk_vcodec_ctx *)h->ctx)->id, \ h 61 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+") h 62 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-") h 70 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_debug(h, fmt, args...) {} h 71 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_debug_enter(h) {} h 72 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h #define mtk_vcodec_debug_leave(h) {} h 749 drivers/media/platform/rcar_jpu.c u32 *h, unsigned int h_min, h 755 drivers/media/platform/rcar_jpu.c height = *h; h 759 drivers/media/platform/rcar_jpu.c v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max, h 764 drivers/media/platform/rcar_jpu.c if (*h < height && *h + h_step < h_max) h 765 drivers/media/platform/rcar_jpu.c *h += h_step; h 773 drivers/media/platform/rcar_jpu.c unsigned int f_type, w, h; h 803 drivers/media/platform/rcar_jpu.c h = pix->height; h 809 drivers/media/platform/rcar_jpu.c (JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h); h 824 drivers/media/platform/rcar_jpu.c pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8; h 1358 drivers/media/platform/rcar_jpu.c unsigned int w, h, bpl; h 1384 drivers/media/platform/rcar_jpu.c h = q_data->format.height; h 1400 drivers/media/platform/rcar_jpu.c src_2_addr = src_1_addr + w * h; h 1430 drivers/media/platform/rcar_jpu.c jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU); h 1431 drivers/media/platform/rcar_jpu.c jpu_write(jpu, h & JCSZ_MASK, JCVSZD); h 1434 drivers/media/platform/rcar_jpu.c jpu_write(jpu, h, JIFESVSZ); h 1466 drivers/media/platform/rcar_jpu.c dst_2_addr = dst_1_addr + w * h; h 116 drivers/media/platform/renesas-ceu.c #define CEU_H_MAX(h) ((h) < CEU_MAX_HEIGHT ? (h) : CEU_MAX_HEIGHT) h 47 drivers/media/platform/rockchip/rga/rga-hw.c unsigned int w, unsigned int h) h 70 drivers/media/platform/rockchip/rga/rga-hw.c lb->y_off = lt->y_off + (h - 1) * frm->stride; h 71 drivers/media/platform/rockchip/rga/rga-hw.c lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride; h 72 drivers/media/platform/rockchip/rga/rga-hw.c lb->v_off = lt->v_off + (h / y_div - 1) * uv_stride; h 1225 drivers/media/platform/s5p-jpeg/jpeg-core.c result->h = height; h 1349 drivers/media/platform/s5p-jpeg/jpeg-core.c pix->height = q_data->h; h 1395 drivers/media/platform/s5p-jpeg/jpeg-core.c u32 *h, unsigned int hmin, unsigned int hmax, h 1401 drivers/media/platform/s5p-jpeg/jpeg-core.c height = *h; h 1420 drivers/media/platform/s5p-jpeg/jpeg-core.c v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0); h 1424 drivers/media/platform/s5p-jpeg/jpeg-core.c if (*h < height && (*h + h_step) < hmax) h 1425 drivers/media/platform/s5p-jpeg/jpeg-core.c *h += h_step; h 1556 drivers/media/platform/s5p-jpeg/jpeg-core.c int w = pix->width, h = pix->height, wh_align; h 1572 drivers/media/platform/s5p-jpeg/jpeg-core.c &h, S5P_JPEG_MIN_HEIGHT, h 1578 drivers/media/platform/s5p-jpeg/jpeg-core.c return (w * h * fmt_depth >> 3) + padding; h 1613 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data->h = pix->height; h 1628 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data->size = q_data->w * q_data->h * h 1666 drivers/media/platform/s5p-jpeg/jpeg-core.c scale_rect.height = ct->out_q.h / 2; h 1712 drivers/media/platform/s5p-jpeg/jpeg-core.c h_ratio = ctx->out_q.h / r->height; h 1727 drivers/media/platform/s5p-jpeg/jpeg-core.c r->height = round_down(ctx->out_q.h / ctx->scale_factor, 2); h 1777 drivers/media/platform/s5p-jpeg/jpeg-core.c base_rect.height = ctx->out_q.h; h 1817 drivers/media/platform/s5p-jpeg/jpeg-core.c s->r.height = ctx->out_q.h; h 2069 drivers/media/platform/s5p-jpeg/jpeg-core.c s5p_jpeg_y(jpeg->regs, ctx->out_q.h); h 2134 drivers/media/platform/s5p-jpeg/jpeg-core.c pix_size = ctx->cap_q.w * ctx->cap_q.h; h 2140 drivers/media/platform/s5p-jpeg/jpeg-core.c padding_bytes = ctx->out_q.h; h 2229 drivers/media/platform/s5p-jpeg/jpeg-core.c ctx->cap_q.h); h 2263 drivers/media/platform/s5p-jpeg/jpeg-core.c ctx->cap_q.h); h 2292 drivers/media/platform/s5p-jpeg/jpeg-core.c pix_size = ctx->cap_q.w * ctx->cap_q.h; h 2514 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data->h = ctx->out_q.h; h 2527 drivers/media/platform/s5p-jpeg/jpeg-core.c &q_data->h, S5P_JPEG_MIN_HEIGHT, h 2530 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3; h 2551 drivers/media/platform/s5p-jpeg/jpeg-core.c ori_h = ctx->out_q.h; h 2567 drivers/media/platform/s5p-jpeg/jpeg-core.c if (ctx->out_q.w != ori_w || ctx->out_q.h != ori_h) { h 199 drivers/media/platform/s5p-jpeg/jpeg-core.h u32 h; h 377 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_TMV_BUFFER_SIZE_V6(w, h) (((w) + 1) * ((h) + 3) * 8) h 381 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6(w, h) (((w) * 192) + 64) h 382 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h) \ h 383 drivers/media/platform/s5p-mfc/regs-mfc-v6.h ((w) * 144 + 8192 * (h) + 49216 + 1048576) h 384 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(w, h) \ h 385 drivers/media/platform/s5p-mfc/regs-mfc-v6.h (2096 * ((w) + (h) + 1)) h 386 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) \ h 387 drivers/media/platform/s5p-mfc/regs-mfc-v6.h S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h) h 388 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(w, h) \ h 389 drivers/media/platform/s5p-mfc/regs-mfc-v6.h ((w) * 32 + (h) * 128 + (((w) + 1) / 2) * 64 + 2112) h 390 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(w, h) \ h 392 drivers/media/platform/s5p-mfc/regs-mfc-v6.h #define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_ENC_V6(w, h) \ h 50 drivers/media/platform/s5p-mfc/regs-mfc-v7.h #define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V7(w, h) \ h 51 drivers/media/platform/s5p-mfc/regs-mfc-v7.h (SZ_1M + ((w) * 144) + (8192 * (h)) + 49216) h 53 drivers/media/platform/s5p-mfc/regs-mfc-v7.h #define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V7(w, h) \ h 55 drivers/media/platform/s5p-mfc/regs-mfc-v7.h ((((((w) * 16) * ((h) * 16)) * 3) / 2) * 4)) h 99 drivers/media/platform/s5p-mfc/regs-mfc-v8.h #define S5P_FIMV_TMV_BUFFER_SIZE_V8(w, h) (((w) + 1) * ((h) + 1) * 8) h 101 drivers/media/platform/s5p-mfc/regs-mfc-v8.h #define S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V8(w, h) (((w) * 704) + 2176) h 102 drivers/media/platform/s5p-mfc/regs-mfc-v8.h #define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V8(w, h) \ h 103 drivers/media/platform/s5p-mfc/regs-mfc-v8.h (((w) * 576 + (h) * 128) + 4128) h 105 drivers/media/platform/s5p-mfc/regs-mfc-v8.h #define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V8(w, h) \ h 107 drivers/media/platform/s5p-mfc/regs-mfc-v8.h #define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V8(w, h) \ h 109 drivers/media/platform/s5p-mfc/regs-mfc-v8.h ((((((w) * 16) * ((h) * 16)) * 3) / 2) * 4)) h 41 drivers/media/platform/sti/delta/delta-v4l2.c static inline int frame_size(u32 w, u32 h, u32 fmt) h 45 drivers/media/platform/sti/delta/delta-v4l2.c return (w * h * 3) / 2; h 231 drivers/media/platform/sti/delta/delta-v4l2.c static inline int estimated_au_size(u32 w, u32 h) h 239 drivers/media/platform/sti/delta/delta-v4l2.c return (w * h); h 21 drivers/media/platform/sti/hva/hva-h264.c #define MB_H(h) ((h + 0xF) / 0x10) h 24 drivers/media/platform/sti/hva/hva-h264.c #define DATA_SIZE(w, h) (MB_W(w) * MB_H(h) * 16) h 47 drivers/media/platform/sti/hva/hva-v4l2.c static inline int frame_size(u32 w, u32 h, u32 fmt) h 52 drivers/media/platform/sti/hva/hva-v4l2.c return (w * h * 3) / 2; h 81 drivers/media/platform/sti/hva/hva-v4l2.c static inline int estimated_stream_size(u32 w, u32 h) h 88 drivers/media/platform/sti/hva/hva-v4l2.c return (w * h * 3) / 4; h 18 drivers/media/platform/sti/hva/hva.h #define hva_to_dev(h) (h->dev) h 50 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h #define CSI_WIN_CTRL_H_ACTIVE(h) ((h) << 16) h 204 drivers/media/platform/ti-vpe/vpdma.h struct vpdma_adb_hdr *h; \ h 206 drivers/media/platform/ti-vpe/vpdma.h h = MMR_ADB_ADDR(buf, str, hdr); \ h 207 drivers/media/platform/ti-vpe/vpdma.h h->offset = (offset_a); \ h 208 drivers/media/platform/ti-vpe/vpdma.h h->nwords = sizeof(adb->regs) >> 2; \ h 928 drivers/media/platform/vicodec/codec-fwht.c u32 h = height; h 932 drivers/media/platform/vicodec/codec-fwht.c h /= 2; h 936 drivers/media/platform/vicodec/codec-fwht.c if (!decode_plane(cf, &rlco, h, w, ref->cb, ref_chroma_stride, h 942 drivers/media/platform/vicodec/codec-fwht.c if (!decode_plane(cf, &rlco, h, w, ref->cr, ref_chroma_stride, h 507 drivers/media/platform/vicodec/vicodec-core.c unsigned int h = ntohl(p_hdr->height); h 511 drivers/media/platform/vicodec/vicodec-core.c if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT) h 222 drivers/media/platform/vivid/vivid-kthread-cap.c unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h) h 231 drivers/media/platform/vivid/vivid-kthread-cap.c vbuf += bpl[i] * h / tpg->vdownsampling[i]; h 88 drivers/media/platform/vivid/vivid-vid-cap.c unsigned h = dev->fmt_cap_rect.height; h 116 drivers/media/platform/vivid/vivid-vid-cap.c if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + h 122 drivers/media/platform/vivid/vivid-vid-cap.c sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / h 561 drivers/media/platform/vivid/vivid-vid-cap.c unsigned w, h; h 580 drivers/media/platform/vivid/vivid-vid-cap.c h = sz->height; h 583 drivers/media/platform/vivid/vivid-vid-cap.c h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; h 586 drivers/media/platform/vivid/vivid-vid-cap.c h = dev->src_rect.height; h 593 drivers/media/platform/vivid/vivid-vid-cap.c mp->height = h / factor; h 600 drivers/media/platform/vivid/vivid-vid-cap.c struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; h 865 drivers/media/platform/vivid/vivid-vid-common.c unsigned h = r->height; h 869 drivers/media/platform/vivid/vivid-vid-common.c h &= 0xffff; h 872 drivers/media/platform/vivid/vivid-vid-common.c h++; h 875 drivers/media/platform/vivid/vivid-vid-common.c if (h < 2) h 876 drivers/media/platform/vivid/vivid-vid-common.c h = 2; h 881 drivers/media/platform/vivid/vivid-vid-common.c if (h > MAX_HEIGHT) h 882 drivers/media/platform/vivid/vivid-vid-common.c h = MAX_HEIGHT; h 885 drivers/media/platform/vivid/vivid-vid-common.c h = h & ~1; h 886 drivers/media/platform/vivid/vivid-vid-common.c if (w < 2 || h < 2) h 888 drivers/media/platform/vivid/vivid-vid-common.c if (w > MAX_WIDTH || h > MAX_HEIGHT) h 899 drivers/media/platform/vivid/vivid-vid-common.c if (r->top + h > MAX_HEIGHT) h 900 drivers/media/platform/vivid/vivid-vid-common.c r->top = MAX_HEIGHT - h; h 903 drivers/media/platform/vivid/vivid-vid-common.c (r->width != w || r->height != h)) h 906 drivers/media/platform/vivid/vivid-vid-common.c r->height = h; h 30 drivers/media/platform/vivid/vivid-vid-out.c unsigned h = dev->fmt_out_rect.height; h 31 drivers/media/platform/vivid/vivid-vid-out.c unsigned int size = dev->bytesperline_out[0] * h + vfmt->data_offset[0]; h 35 drivers/media/platform/vivid/vivid-vid-out.c size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] + h 66 drivers/media/platform/vivid/vivid-vid-out.c if (sizes[p] < dev->bytesperline_out[p] * h + h 72 drivers/media/platform/vivid/vivid-vid-out.c sizes[p] = p ? dev->bytesperline_out[p] * h + h 107 drivers/media/platform/vivid/vivid-vid-out.c unsigned int h = dev->fmt_out_rect.height; h 108 drivers/media/platform/vivid/vivid-vid-out.c unsigned int size = dev->bytesperline_out[0] * h; h 112 drivers/media/platform/vivid/vivid-vid-out.c size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; h 130 drivers/media/platform/vivid/vivid-vid-out.c size = dev->bytesperline_out[p] * h; h 359 drivers/media/platform/vivid/vivid-vid-out.c unsigned w, h; h 373 drivers/media/platform/vivid/vivid-vid-out.c h = (dev->std_out & V4L2_STD_525_60) ? 480 : 576; h 376 drivers/media/platform/vivid/vivid-vid-out.c h = dev->sink_rect.height; h 382 drivers/media/platform/vivid/vivid-vid-out.c mp->height = h / factor; h 389 drivers/media/platform/vivid/vivid-vid-out.c struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; h 748 drivers/media/usb/au0828/au0828-video.c int ret, h, w; h 766 drivers/media/usb/au0828/au0828-video.c h = d->height / 2 + 2; h 779 drivers/media/usb/au0828/au0828-video.c au0828_writereg(d, 0x116, h & 0xff); h 780 drivers/media/usb/au0828/au0828-video.c au0828_writereg(d, 0x117, h >> 8); h 296 drivers/media/usb/em28xx/em28xx-video.c static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v) h 302 drivers/media/usb/em28xx/em28xx-video.c mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); h 306 drivers/media/usb/em28xx/em28xx-video.c buf[0] = h; h 307 drivers/media/usb/em28xx/em28xx-video.c buf[1] = h >> 8; h 317 drivers/media/usb/em28xx/em28xx-video.c mode = (h || v) ? 0x30 : 0x00; h 561 drivers/media/usb/gspca/cpia1.c u8 e, u8 f, u8 g, u8 h, h 577 drivers/media/usb/gspca/cpia1.c gspca_dev->usb_buf[3] = h; h 54 drivers/media/usb/gspca/gspca.c __u32 pixfmt, int w, int h) h 63 drivers/media/usb/gspca/gspca.c w, h); h 68 drivers/media/usb/gspca/gspca.c w, h); h 1045 drivers/media/usb/gspca/gspca.c int w, h, mode, mode2; h 1048 drivers/media/usb/gspca/gspca.c h = fmt->fmt.pix.height; h 1051 drivers/media/usb/gspca/gspca.c fmt->fmt.pix.pixelformat, w, h); h 1054 drivers/media/usb/gspca/gspca.c mode = wxh_to_nearest_mode(gspca_dev, w, h, fmt->fmt.pix.pixelformat); h 1070 drivers/media/usb/gspca/gspca.c fmt->fmt.pix.height = h; h 1694 drivers/media/usb/gspca/nw80x.c int w, h; h 1709 drivers/media/usb/gspca/nw80x.c h = (gspca_dev->usb_buf[5] << 8) + gspca_dev->usb_buf[4] h 1711 drivers/media/usb/gspca/nw80x.c sd->ae_res = h * w; h 59 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c int w, h; h 319 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c hdcs->h = height; h 418 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c int h = vf->fmt.pix.height; h 436 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c if (h == -1) h 437 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c h = ldef; h 438 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c else if (h < lmin) h 439 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c h = lmin; h 440 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c else if (h > lmax) h 441 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c h = lmax; h 446 drivers/media/usb/pvrusb2/pvrusb2-v4l2.c vf->fmt.pix.height = h; h 662 drivers/media/usb/s2255/s2255drv.c int h = vc->height; h 671 drivers/media/usb/s2255/s2255drv.c (h < norm_minh(vc)) || h 672 drivers/media/usb/s2255/s2255drv.c (h > norm_maxh(vc))) { h 676 drivers/media/usb/s2255/s2255drv.c size = w * h * (vc->fmt->depth >> 3); h 880 drivers/media/usb/stkwebcam/stk-webcam.c unsigned h; h 883 drivers/media/usb/stkwebcam/stk-webcam.c { .w = 1280, .h = 1024, .m = MODE_SXGA, }, h 884 drivers/media/usb/stkwebcam/stk-webcam.c { .w = 640, .h = 480, .m = MODE_VGA, }, h 885 drivers/media/usb/stkwebcam/stk-webcam.c { .w = 352, .h = 288, .m = MODE_CIF, }, h 886 drivers/media/usb/stkwebcam/stk-webcam.c { .w = 320, .h = 240, .m = MODE_QVGA, }, h 887 drivers/media/usb/stkwebcam/stk-webcam.c { .w = 176, .h = 144, .m = MODE_QCIF, }, h 905 drivers/media/usb/stkwebcam/stk-webcam.c pix_format->height = stk_sizes[i].h; h 939 drivers/media/usb/stkwebcam/stk-webcam.c fmtd->fmt.pix.height = stk_sizes[i-1].h; h 944 drivers/media/usb/stkwebcam/stk-webcam.c fmtd->fmt.pix.height = stk_sizes[i].h; h 997 drivers/media/usb/stkwebcam/stk-webcam.c (stk_sizes[i].h >> 8) & 0xff); h 999 drivers/media/usb/stkwebcam/stk-webcam.c stk_sizes[i].h & 0xff); h 1188 drivers/media/usb/stkwebcam/stk-webcam.c frms->discrete.height = stk_sizes[frms->index].h; h 196 drivers/media/usb/uvc/uvc_v4l2.c u16 h = format->frame[i].wHeight; h 198 drivers/media/usb/uvc/uvc_v4l2.c d = min(w, rw) * min(h, rh); h 199 drivers/media/usb/uvc/uvc_v4l2.c d = w*h + rw*rh - 2*d; h 122 drivers/media/v4l2-core/v4l2-common.c u32 *h, unsigned int hmin, unsigned int hmax, h 126 drivers/media/v4l2-core/v4l2-common.c *h = clamp_align(*h, hmin, hmax, halign); h 134 drivers/media/v4l2-core/v4l2-common.c halign = __ffs(*h); h 149 drivers/media/v4l2-core/v4l2-common.c *h = clamp_align(*h, hmin, hmax, halign + 1); h 150 drivers/media/v4l2-core/v4l2-common.c halign = __ffs(*h); h 3301 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl_helper *h; h 3305 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0, h = helpers; i < cs->count; i++, h++) { h 3335 drivers/media/v4l2-core/v4l2-ctrls.c h->ref = ref; h 3366 drivers/media/v4l2-core/v4l2-ctrls.c h->mref = ref; h 3370 drivers/media/v4l2-core/v4l2-ctrls.c h->next = 0; h 3387 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0, h = helpers; i < cs->count; i++, h++) { h 3388 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl_ref *mref = h->mref; h 3399 drivers/media/v4l2-core/v4l2-ctrls.c h->mref = NULL; h 3402 drivers/media/v4l2-core/v4l2-ctrls.c mref->helper = h; h 1269 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *h; h 1272 drivers/message/fusion/mptscsih.c h = shost_priv(SChost); h 1274 drivers/message/fusion/mptscsih.c if (h->info_kbuf == NULL) h 1275 drivers/message/fusion/mptscsih.c if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) h 1276 drivers/message/fusion/mptscsih.c return h->info_kbuf; h 1277 drivers/message/fusion/mptscsih.c h->info_kbuf[0] = '\0'; h 1279 drivers/message/fusion/mptscsih.c mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); h 1280 drivers/message/fusion/mptscsih.c h->info_kbuf[size-1] = '\0'; h 1282 drivers/message/fusion/mptscsih.c return h->info_kbuf; h 186 drivers/mfd/arizona-irq.c static int arizona_irq_map(struct irq_domain *h, unsigned int virq, h 189 drivers/mfd/arizona-irq.c struct arizona *data = h->host_data; h 140 drivers/mfd/tps65217.c static int tps65217_irq_map(struct irq_domain *h, unsigned int virq, h 143 drivers/mfd/tps65217.c struct tps65217 *tps = h->host_data; h 291 drivers/mfd/tps6586x.c static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq, h 294 drivers/mfd/tps6586x.c struct tps6586x *tps6586x = h->host_data; h 544 drivers/mfd/wm831x-irq.c static int wm831x_irq_map(struct irq_domain *h, unsigned int virq, h 547 drivers/mfd/wm831x-irq.c irq_set_chip_data(virq, h->host_data); h 162 drivers/mfd/wm8994-irq.c static int wm8994_edge_irq_map(struct irq_domain *h, unsigned int virq, h 165 drivers/mfd/wm8994-irq.c struct wm8994 *wm8994 = h->host_data; h 349 drivers/misc/sgi-gru/gru_instructions.h #define CB_IMA(h) ((h) | IMA_UNMAPPED) h 25 drivers/misc/sgi-gru/gruhandles.c #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) h 40 drivers/misc/sgi-gru/gruhandles.c static void start_instruction(void *h) h 42 drivers/misc/sgi-gru/gruhandles.c unsigned long *w0 = h; h 46 drivers/misc/sgi-gru/gruhandles.c gru_flush_cache(h); h 49 drivers/misc/sgi-gru/gruhandles.c static void report_instruction_timeout(void *h) h 51 drivers/misc/sgi-gru/gruhandles.c unsigned long goff = GSEGPOFF((unsigned long)h); h 61 drivers/misc/sgi-gru/gruhandles.c panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id); h 64 drivers/misc/sgi-gru/gruhandles.c static int wait_instruction_complete(void *h, enum mcs_op opc) h 71 drivers/misc/sgi-gru/gruhandles.c status = GET_MSEG_HANDLE_STATUS(h); h 75 drivers/misc/sgi-gru/gruhandles.c report_instruction_timeout(h); h 76 drivers/misc/sgi-gru/gruhandles.h #define GSEGPOFF(h) ((h) & (GRU_SIZE - 1)) h 79 drivers/misc/sgi-gru/gruhandles.h #define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) h 82 drivers/misc/sgi-gru/gruhandles.h #define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \ h 84 drivers/misc/sgi-gru/gruhandles.h (((h) & (GRU_HANDLE_STRIDE - 1)) == 0)) h 86 drivers/misc/sgi-gru/grukservices.c #define ASYNC_HAN_TO_BID(h) ((h) - 1) h 88 drivers/misc/sgi-gru/grukservices.c #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] h 133 drivers/misc/sgi-gru/grukservices.c #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) h 551 drivers/misc/sgi-gru/grutables.h static inline int __trylock_handle(void *h) h 553 drivers/misc/sgi-gru/grutables.h return !test_and_set_bit(1, h); h 556 drivers/misc/sgi-gru/grutables.h static inline void __lock_handle(void *h) h 558 drivers/misc/sgi-gru/grutables.h while (test_and_set_bit(1, h)) h 562 drivers/misc/sgi-gru/grutables.h static inline void __unlock_handle(void *h) h 564 drivers/misc/sgi-gru/grutables.h clear_bit(1, h); h 138 drivers/misc/vmw_vmci/vmci_queue_pair.c } h; /* Used by the host. */ h 346 drivers/misc/vmw_vmci/vmci_queue_pair.c va = kmap(kernel_if->u.h.page[page_index]); h 360 drivers/misc/vmw_vmci/vmci_queue_pair.c kunmap(kernel_if->u.h.page[page_index]); h 365 drivers/misc/vmw_vmci/vmci_queue_pair.c kunmap(kernel_if->u.h.page[page_index]); h 394 drivers/misc/vmw_vmci/vmci_queue_pair.c va = kmap(kernel_if->u.h.page[page_index]); h 408 drivers/misc/vmw_vmci/vmci_queue_pair.c kunmap(kernel_if->u.h.page[page_index]); h 413 drivers/misc/vmw_vmci/vmci_queue_pair.c kunmap(kernel_if->u.h.page[page_index]); h 535 drivers/misc/vmw_vmci/vmci_queue_pair.c sizeof(*queue->kernel_if->u.h.page)) h 538 drivers/misc/vmw_vmci/vmci_queue_pair.c queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); h 548 drivers/misc/vmw_vmci/vmci_queue_pair.c queue->kernel_if->u.h.header_page = h 550 drivers/misc/vmw_vmci/vmci_queue_pair.c queue->kernel_if->u.h.page = h 551 drivers/misc/vmw_vmci/vmci_queue_pair.c &queue->kernel_if->u.h.header_page[1]; h 656 drivers/misc/vmw_vmci/vmci_queue_pair.c produce_q->kernel_if->u.h.header_page); h 660 drivers/misc/vmw_vmci/vmci_queue_pair.c qp_release_pages(produce_q->kernel_if->u.h.header_page, h 669 drivers/misc/vmw_vmci/vmci_queue_pair.c consume_q->kernel_if->u.h.header_page); h 673 drivers/misc/vmw_vmci/vmci_queue_pair.c qp_release_pages(consume_q->kernel_if->u.h.header_page, h 675 drivers/misc/vmw_vmci/vmci_queue_pair.c qp_release_pages(produce_q->kernel_if->u.h.header_page, h 716 drivers/misc/vmw_vmci/vmci_queue_pair.c qp_release_pages(produce_q->kernel_if->u.h.header_page, h 718 drivers/misc/vmw_vmci/vmci_queue_pair.c memset(produce_q->kernel_if->u.h.header_page, 0, h 719 drivers/misc/vmw_vmci/vmci_queue_pair.c sizeof(*produce_q->kernel_if->u.h.header_page) * h 721 drivers/misc/vmw_vmci/vmci_queue_pair.c qp_release_pages(consume_q->kernel_if->u.h.header_page, h 723 drivers/misc/vmw_vmci/vmci_queue_pair.c memset(consume_q->kernel_if->u.h.header_page, 0, h 724 drivers/misc/vmw_vmci/vmci_queue_pair.c sizeof(*consume_q->kernel_if->u.h.header_page) * h 747 drivers/misc/vmw_vmci/vmci_queue_pair.c if (produce_q->kernel_if->u.h.header_page == NULL || h 748 drivers/misc/vmw_vmci/vmci_queue_pair.c *produce_q->kernel_if->u.h.header_page == NULL) h 751 drivers/misc/vmw_vmci/vmci_queue_pair.c headers[0] = *produce_q->kernel_if->u.h.header_page; h 752 drivers/misc/vmw_vmci/vmci_queue_pair.c headers[1] = *consume_q->kernel_if->u.h.header_page; h 138 drivers/mmc/host/au1xmmc.c #define HOST_STATUS(h) ((h)->iobase + SD_STATUS) h 139 drivers/mmc/host/au1xmmc.c #define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) h 140 drivers/mmc/host/au1xmmc.c #define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) h 141 drivers/mmc/host/au1xmmc.c #define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) h 142 drivers/mmc/host/au1xmmc.c #define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) h 143 drivers/mmc/host/au1xmmc.c #define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) h 144 drivers/mmc/host/au1xmmc.c #define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) h 145 drivers/mmc/host/au1xmmc.c #define HOST_CMD(h) ((h)->iobase + SD_CMD) h 146 drivers/mmc/host/au1xmmc.c #define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) h 147 drivers/mmc/host/au1xmmc.c #define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) h 148 drivers/mmc/host/au1xmmc.c #define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) h 150 drivers/mmc/host/au1xmmc.c #define DMA_CHANNEL(h) \ h 151 drivers/mmc/host/au1xmmc.c (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) h 49 drivers/mmc/host/meson-gx-mmc.c #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask) h 50 drivers/mmc/host/meson-gx-mmc.c #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask) h 51 drivers/mmc/host/meson-gx-mmc.c #define CLK_ALWAYS_ON(h) (h->data->always_on) h 414 drivers/mmc/host/mmci.h int (*get_rx_fifocnt)(struct mmci_host *h, u32 status, int remain); h 70 drivers/mtd/maps/ixp4xx.c #define BYTE0(h) ((h) & 0xFF) h 71 drivers/mtd/maps/ixp4xx.c #define BYTE1(h) (((h) >> 8) & 0xFF) h 85 drivers/mtd/maps/ixp4xx.c #define BYTE0(h) (((h) >> 8) & 0xFF) h 86 drivers/mtd/maps/ixp4xx.c #define BYTE1(h) ((h) & 0xFF) h 775 drivers/mtd/mtdswap.c unsigned int h, x, y, dist, base; h 792 drivers/mtd/mtdswap.c h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2; h 796 drivers/mtd/mtdswap.c y = (x * h + base / 2) / base; h 188 drivers/net/ethernet/aquantia/atlantic/aq_ring.c static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, h 191 drivers/net/ethernet/aquantia/atlantic/aq_ring.c return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); h 325 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c unsigned int h = 0U; h 332 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c h = (mac_addr[0] << 8) | (mac_addr[1]); h 338 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); h 786 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); h 797 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c h, h 367 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c unsigned int h = 0U; h 374 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c h = (mac_addr[0] << 8) | (mac_addr[1]); h 380 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); h 858 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); h 868 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c h, HW_ATL_B0_MAC_MIN + i); h 653 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c u32 h = 0U; h 688 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c h = 0x8001300EU; h 697 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c mac[1] = (u8)(0xFFU & h); h 698 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c h >>= 8; h 699 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c mac[0] = (u8)(0xFFU & h); h 248 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c u32 h = 0U; h 272 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c h = 0x8001300EU; h 281 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c mac[1] = (u8)(0xFFU & h); h 282 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c h >>= 8; h 283 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c mac[0] = (u8)(0xFFU & h); h 397 drivers/net/ethernet/calxeda/xgmac.c #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) h 398 drivers/net/ethernet/calxeda/xgmac.c #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) h 811 drivers/net/ethernet/cavium/liquidio/octeon_console.c struct octeon_firmware_file_header *h; h 827 drivers/net/ethernet/cavium/liquidio/octeon_console.c h = (struct octeon_firmware_file_header *)data; h 829 drivers/net/ethernet/cavium/liquidio/octeon_console.c if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) { h 837 drivers/net/ethernet/cavium/liquidio/octeon_console.c if (crc32_result != be32_to_cpu(h->crc32)) { h 839 drivers/net/ethernet/cavium/liquidio/octeon_console.c crc32_result, be32_to_cpu(h->crc32)); h 843 drivers/net/ethernet/cavium/liquidio/octeon_console.c if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) { h 845 drivers/net/ethernet/cavium/liquidio/octeon_console.c LIQUIDIO_PACKAGE, h->version); h 849 drivers/net/ethernet/cavium/liquidio/octeon_console.c if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE), h 853 drivers/net/ethernet/cavium/liquidio/octeon_console.c h->version + strlen(LIQUIDIO_PACKAGE)); h 857 drivers/net/ethernet/cavium/liquidio/octeon_console.c if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) { h 859 drivers/net/ethernet/cavium/liquidio/octeon_console.c be32_to_cpu(h->num_images)); h 863 drivers/net/ethernet/cavium/liquidio/octeon_console.c dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version); h 865 drivers/net/ethernet/cavium/liquidio/octeon_console.c h->version); h 870 drivers/net/ethernet/cavium/liquidio/octeon_console.c be32_to_cpu(h->num_images)); h 872 drivers/net/ethernet/cavium/liquidio/octeon_console.c for (i = 0; i < be32_to_cpu(h->num_images); i++) { h 873 drivers/net/ethernet/cavium/liquidio/octeon_console.c load_addr = be64_to_cpu(h->desc[i].addr); h 874 drivers/net/ethernet/cavium/liquidio/octeon_console.c image_len = be32_to_cpu(h->desc[i].len); h 909 drivers/net/ethernet/cavium/liquidio/octeon_console.c if ((sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))) < h 914 drivers/net/ethernet/cavium/liquidio/octeon_console.c strncat(h->bootcmd, boottime, h 915 drivers/net/ethernet/cavium/liquidio/octeon_console.c sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))); h 918 drivers/net/ethernet/cavium/liquidio/octeon_console.c h->bootcmd); h 921 drivers/net/ethernet/cavium/liquidio/octeon_console.c ret = octeon_console_send_cmd(oct, h->bootcmd, 50); h 1005 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) h 1008 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c cpl_handlers[opcode] = h ? h : do_bad_cpl; h 140 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h); h 156 drivers/net/ethernet/cisco/enic/enic_clsf.c static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, h 161 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_for_each_entry(tpos, h, node) h 406 drivers/net/ethernet/freescale/ucc_geth.c out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); h 430 drivers/net/ethernet/freescale/ucc_geth.c set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); h 658 drivers/net/ethernet/freescale/ucc_geth.h u16 h; /* address (MSB) */ h 237 drivers/net/ethernet/hisilicon/hns/hnae.c static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, h 243 drivers/net/ethernet/hisilicon/hns/hnae.c q->handle = h; h 370 drivers/net/ethernet/hisilicon/hns/hnae.c void hnae_put_handle(struct hnae_handle *h) h 372 drivers/net/ethernet/hisilicon/hns/hnae.c struct hnae_ae_dev *dev = h->dev; h 375 drivers/net/ethernet/hisilicon/hns/hnae.c for (i = 0; i < h->q_num; i++) h 376 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_fini_queue(h->qs[i]); h 378 drivers/net/ethernet/hisilicon/hns/hnae.c if (h->dev->ops->reset) h 379 drivers/net/ethernet/hisilicon/hns/hnae.c h->dev->ops->reset(h); h 381 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_list_del(&dev->lock, &h->node); h 384 drivers/net/ethernet/hisilicon/hns/hnae.c dev->ops->put_handle(h); h 665 drivers/net/ethernet/hisilicon/hns/hnae.h static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h) h 670 drivers/net/ethernet/hisilicon/hns/hnae.h for (i = 0; i < h->q_num; i++) { h 671 drivers/net/ethernet/hisilicon/hns/hnae.h ring = &h->qs[i]->rx_ring; h 680 drivers/net/ethernet/hisilicon/hns/hnae.h static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h) h 685 drivers/net/ethernet/hisilicon/hns/hnae.h for (i = 0; i < h->q_num; i++) { h 686 drivers/net/ethernet/hisilicon/hns/hnae.h ring = &h->qs[i]->rx_ring; h 930 drivers/net/ethernet/hisilicon/hns/hns_enet.c static int is_valid_clean_head(struct hnae_ring *ring, int h) h 935 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (unlikely(h > ring->desc_num)) h 940 drivers/net/ethernet/hisilicon/hns/hns_enet.c assert(u != c && h != c); /* must be checked before call this func */ h 942 drivers/net/ethernet/hisilicon/hns/hns_enet.c return u > c ? (h > c && h <= u) : (h > c || h <= u); h 1092 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1101 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, h 1109 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->adjust_link(h, ndev->phydev->speed, h 1115 drivers/net/ethernet/hisilicon/hns/hns_enet.c state = state && h->dev->ops->get_status(h); h 1136 drivers/net/ethernet/hisilicon/hns/hns_enet.c int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) h 1139 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct phy_device *phy_dev = h->phy_dev; h 1142 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (!h->phy_dev) h 1145 drivers/net/ethernet/hisilicon/hns/hns_enet.c ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); h 1149 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->phy_if == PHY_INTERFACE_MODE_XGMII) h 1152 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { h 1156 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->phy_if); h 1158 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if); h 1171 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1176 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); h 1184 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1191 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); h 1205 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1207 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->update_stats(h, &netdev->stats); h 1225 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1227 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); h 1278 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1284 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = 0; i < h->q_num * 2; i++) { h 1305 drivers/net/ethernet/hisilicon/hns/hns_enet.c cpu = hns_nic_init_affinity_mask(h->q_num, i, h 1318 drivers/net/ethernet/hisilicon/hns/hns_enet.c hns_nic_free_irq(h->q_num, priv); h 1325 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1338 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = 0; i < h->q_num * 2; i++) { h 1344 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); h 1348 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; h 1367 drivers/net/ethernet/hisilicon/hns/hns_enet.c hns_nic_free_irq(h->q_num, priv); h 1444 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1453 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = netif_set_real_num_tx_queues(ndev, h->q_num); h 1460 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = netif_set_real_num_rx_queues(ndev, h->q_num); h 1563 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1564 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_ae_ops *ops = h->dev->ops; h 1568 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1); h 1572 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = ops->start ? ops->start(h) : 0; h 1577 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->phy_if != PHY_INTERFACE_MODE_XGMII) h 1583 drivers/net/ethernet/hisilicon/hns/hns_enet.c ops->adjust_link(h, speed, duplex); h 1594 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1595 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_ae_ops *ops = h->dev->ops; h 1597 drivers/net/ethernet/hisilicon/hns/hns_enet.c ops->stop(h); h 1598 drivers/net/ethernet/hisilicon/hns/hns_enet.c ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0); h 1617 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1618 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_ae_ops *ops = h->dev->ops; h 1633 drivers/net/ethernet/hisilicon/hns/hns_enet.c indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir); h 1639 drivers/net/ethernet/hisilicon/hns/hns_enet.c ops->get_rss(h, org_indir, NULL, NULL); h 1654 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = 0; i < h->q_num; i++) { h 1655 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring = &h->qs[i]->rx_ring; h 1675 drivers/net/ethernet/hisilicon/hns/hns_enet.c ops->set_rss(h, cur_indir, NULL, 0); h 1710 drivers/net/ethernet/hisilicon/hns/hns_enet.c ops->set_rss(h, org_indir, NULL, 0); h 1723 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1735 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (!h->dev->ops->set_mtu) h 1747 drivers/net/ethernet/hisilicon/hns/hns_enet.c hnae_reinit_all_ring_desc(h); h 1753 drivers/net/ethernet/hisilicon/hns/hns_enet.c hnae_reinit_all_ring_page_off(h); h 1761 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = h->dev->ops->set_mtu(h, new_mtu); h 1827 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1829 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->add_uc_addr) h 1830 drivers/net/ethernet/hisilicon/hns/hns_enet.c return h->dev->ops->add_uc_addr(h, addr); h 1839 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1841 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->rm_uc_addr) h 1842 drivers/net/ethernet/hisilicon/hns/hns_enet.c return h->dev->ops->rm_uc_addr(h, addr); h 1857 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1860 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (!h) { h 1865 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->clr_mc_addr) h 1866 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->clr_mc_addr(h)) h 1869 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->set_mc_addr) { h 1871 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->set_mc_addr(h, ha->addr)) h 1879 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1881 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->dev->ops->set_promisc_mode) { h 1883 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->set_promisc_mode(h, 1); h 1885 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->set_promisc_mode(h, 0); h 1903 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1905 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (idx = 0; idx < h->q_num; idx++) { h 1906 drivers/net/ethernet/hisilicon/hns/hns_enet.c tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; h 1907 drivers/net/ethernet/hisilicon/hns/hns_enet.c tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; h 1908 drivers/net/ethernet/hisilicon/hns/hns_enet.c rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; h 1909 drivers/net/ethernet/hisilicon/hns/hns_enet.c rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; h 1973 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1975 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->phy_dev) { h 1976 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->phy_if != PHY_INTERFACE_MODE_XGMII) h 1979 drivers/net/ethernet/hisilicon/hns/hns_enet.c (void)genphy_read_status(h->phy_dev); h 1987 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 1988 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_ae_ops *ops = h->dev->ops; h 2005 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = 0; i < h->q_num; i++) { h 2007 drivers/net/ethernet/hisilicon/hns/hns_enet.c i, h->qs[i]->tx_ring.next_to_clean); h 2009 drivers/net/ethernet/hisilicon/hns/hns_enet.c i, h->qs[i]->tx_ring.next_to_use); h 2011 drivers/net/ethernet/hisilicon/hns/hns_enet.c i, h->qs[i]->rx_ring.next_to_clean); h 2013 drivers/net/ethernet/hisilicon/hns/hns_enet.c i, h->qs[i]->rx_ring.next_to_use); h 2057 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 2061 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->update_led_status(h); h 2103 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 2108 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (h->q_num > NIC_MAX_Q_PER_VF) { h 2109 drivers/net/ethernet/hisilicon/hns/hns_enet.c netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); h 2113 drivers/net/ethernet/hisilicon/hns/hns_enet.c priv->ring_data = kzalloc(array3_size(h->q_num, h 2119 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = 0; i < h->q_num; i++) { h 2122 drivers/net/ethernet/hisilicon/hns/hns_enet.c rd->ring = &h->qs[i]->tx_ring; h 2131 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = h->q_num; i < h->q_num * 2; i++) { h 2133 drivers/net/ethernet/hisilicon/hns/hns_enet.c rd->queue_index = i - h->q_num; h 2134 drivers/net/ethernet/hisilicon/hns/hns_enet.c rd->ring = &h->qs[i - h->q_num]->rx_ring; h 2150 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 2153 drivers/net/ethernet/hisilicon/hns/hns_enet.c for (i = 0; i < h->q_num * 2; i++) { h 2171 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h = priv->ae_handle; h 2192 drivers/net/ethernet/hisilicon/hns/hns_enet.c h->dev->ops->set_tso_stats(h, 1); h 2199 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *h; h 2202 drivers/net/ethernet/hisilicon/hns/hns_enet.c h = hnae_get_handle(&priv->netdev->dev, h 2204 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (IS_ERR_OR_NULL(h)) { h 2209 drivers/net/ethernet/hisilicon/hns/hns_enet.c priv->ae_handle = h; h 2211 drivers/net/ethernet/hisilicon/hns/hns_enet.c ret = hns_nic_init_phy(ndev, h); h 90 drivers/net/ethernet/hisilicon/hns/hns_enet.h int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h); h 43 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h; h 45 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h = priv->ae_handle; h 54 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (h->dev && h->dev->ops && h->dev->ops->get_status) h 55 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c link_stat = link_stat && h->dev->ops->get_status(h); h 118 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h; h 128 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h = priv->ae_handle; h 129 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (!h->dev || !h->dev->ops || !h->dev->ops->get_info) h 132 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->get_info(h, NULL, &speed, &duplex); h 160 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c supported |= h->if_support; h 161 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (h->phy_if == PHY_INTERFACE_MODE_SGMII) { h 164 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) { h 169 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c switch (h->media_type) { h 181 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG)) h 205 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h; h 215 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h = priv->ae_handle; h 218 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (h->phy_if == PHY_INTERFACE_MODE_XGMII) { h 223 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) { h 241 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (h->dev->ops->adjust_link) { h 243 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); h 289 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 295 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->set_loopback(h, loop, 0x1); h 298 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if ((h->dev->ops->set_loopback) && h 300 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->set_loopback(h, loop, 0x1); h 303 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (h->dev->ops->set_loopback) h 304 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->set_loopback(h, loop, 0x1); h 310 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (!ret && h->dev->ops->set_loopback) { h 312 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->set_loopback(h, h 316 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->set_loopback(h, h 327 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->set_promisc_mode( h 328 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h, ndev->flags & IFF_PROMISC); h 330 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->set_promisc_mode(h, 1); h 340 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 352 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; h 363 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->adjust_link(h, speed, duplex); h 481 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 527 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->q_num, h->q_num * 2 - 1, h 550 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 562 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (h->dev->ops->stop) h 563 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->stop(h); h 566 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c (void)__lb_clean_rings(priv, 0, h->q_num - 1, 256); h 713 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h; h 716 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h = priv->ae_handle; h 717 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ops = h->dev->ops; h 837 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 841 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) { h 846 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->update_stats(h, &netdev->stats); h 881 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->get_stats(h, &p[26]); h 893 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 896 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c if (!h->dev->ops->get_strings) { h 969 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c h->dev->ops->get_strings(h, stringset, (u8 *)buff); h 983 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 984 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_ae_ops *ops = h->dev->ops; h 1001 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); h 1040 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c struct hnae_handle *h = priv->ae_handle; h 1091 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE); h 1093 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c return h->dev->ops->set_led_id(h, HNAE_LED_ON); h 1095 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c return h->dev->ops->set_led_id(h, HNAE_LED_OFF); h 1097 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE); h 496 drivers/net/ethernet/hisilicon/hns3/hnae3.h void (*get_tqps_and_rss_info)(struct hnae3_handle *h, h 9 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c struct hnae3_handle *h = hns3_get_handle(ndev); h 14 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c if (h->kinfo.dcb_ops->ieee_getets) h 15 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c return h->kinfo.dcb_ops->ieee_getets(h, ets); h 22 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c struct hnae3_handle *h = hns3_get_handle(ndev); h 27 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c if (h->kinfo.dcb_ops->ieee_setets) h 28 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c return h->kinfo.dcb_ops->ieee_setets(h, ets); h 35 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c struct hnae3_handle *h = hns3_get_handle(ndev); h 40 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c if (h->kinfo.dcb_ops->ieee_getpfc) h 41 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); h 48 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c struct hnae3_handle *h = hns3_get_handle(ndev); h 53 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c if (h->kinfo.dcb_ops->ieee_setpfc) h 54 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); h 62 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c struct hnae3_handle *h = hns3_get_handle(ndev); h 64 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c if (h->kinfo.dcb_ops->getdcbx) h 65 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c return h->kinfo.dcb_ops->getdcbx(h); h 73 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c struct hnae3_handle *h = hns3_get_handle(ndev); h 75 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c if (h->kinfo.dcb_ops->setdcbx) h 76 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c return h->kinfo.dcb_ops->setdcbx(h, mode); h 15 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c static int hns3_dbg_queue_info(struct hnae3_handle *h, h 18 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c struct hns3_nic_priv *priv = h->priv; h 27 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_err(&h->pdev->dev, "ring_data is NULL\n"); h 31 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c queue_max = h->kinfo.num_tqps; h 38 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "queue info\n"); h 40 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c if (queue_num >= h->kinfo.num_tqps) { h 41 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_err(&h->pdev->dev, h 43 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c h->kinfo.num_tqps - 1); h 57 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring; h 62 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i, h 67 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value); h 71 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value); h 75 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value); h 79 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value); h 83 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value); h 87 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); h 94 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i, h 99 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value); h 103 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value); h 107 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value); h 111 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value); h 115 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value); h 119 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value); h 123 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i, h 130 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c static int hns3_dbg_queue_map(struct hnae3_handle *h) h 132 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c struct hns3_nic_priv *priv = h->priv; h 136 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c if (!h->ae_algo->ops->get_global_queue_id) h 139 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "map info for queue id and vector id\n"); h 140 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, h 142 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 145 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c global_qid = h->ae_algo->ops->get_global_queue_id(h, i); h 151 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, h 160 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) h 162 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c struct hns3_nic_priv *priv = h->priv; h 165 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c struct device *dev = &h->pdev->dev; h 180 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c if (q_num >= h->kinfo.num_tqps) { h 182 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c h->kinfo.num_tqps - 1); h 217 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c ring = ring_data[q_num + h->kinfo.num_tqps].ring; h 238 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c static void hns3_dbg_help(struct hnae3_handle *h) h 244 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "available commands\n"); h 245 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "queue info <number>\n"); h 246 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "queue map\n"); h 247 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n"); h 249 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c if (!hns3_is_phys_func(h->pdev)) h 252 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump fd tcam\n"); h 253 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump tc\n"); h 254 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump tm map <q_num>\n"); h 255 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump tm\n"); h 256 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump qos pause cfg\n"); h 257 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump qos pri map\n"); h 258 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump qos buf cfg\n"); h 259 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump mng tbl\n"); h 260 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump reset info\n"); h 261 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump m7 info\n"); h 262 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n"); h 263 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "dump mac tnl status\n"); h 274 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "%s", printf_buf); h 281 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c dev_info(&h->pdev->dev, "%s", printf_buf); h 37 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); h 250 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 256 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); h 261 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 262 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_knic_private_info *kinfo = &h->kinfo; h 304 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static u16 hns3_get_max_available_channels(struct hnae3_handle *h) h 308 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); h 309 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rss_size = alloc_tqps / h->kinfo.num_tc; h 369 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 373 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = hns3_nic_reset_all_ring(h); h 396 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (j = 0; j < h->kinfo.num_tqps; j++) h 397 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_tqp_enable(h->kinfo.tqp[j]); h 400 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; h 409 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_tqp_disable(h->kinfo.tqp[j]); h 446 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 465 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c kinfo = &h->kinfo; h 469 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_timer_task) h 470 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->set_timer_task(priv->ae_handle, true); h 474 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, "net open\n"); h 479 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_reset_tx_queue(struct hnae3_handle *h) h 481 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct net_device *ndev = h->kinfo.netdev; h 486 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 496 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 505 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) h 506 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_tqp_disable(h->kinfo.tqp[i]); h 531 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 536 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, "net stop\n"); h 538 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_timer_task) h 539 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->set_timer_task(priv->ae_handle, false); h 552 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 554 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->add_uc_addr) h 555 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->add_uc_addr(h, addr); h 563 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 565 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->rm_uc_addr) h 566 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->rm_uc_addr(h, addr); h 574 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 576 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->add_mc_addr) h 577 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->add_mc_addr(h, addr); h 585 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 587 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->rm_mc_addr) h 588 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->rm_mc_addr(h, addr); h 610 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 638 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->netdev_flags = new_flags; h 645 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 647 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_promisc_mode) { h 648 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->set_promisc_mode(h, h 659 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 662 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { h 663 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; h 668 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->enable_vlan_filter(h, enable); h 1409 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1422 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); h 1436 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1441 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (!h->ae_algo->ops->do_ioctl) h 1444 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->do_ioctl(h, ifr, cmd); h 1452 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 1456 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { h 1458 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_gro_en(h, enable); h 1464 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->enable_vlan_filter) { h 1466 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->enable_vlan_filter(h, enable); h 1470 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->enable_hw_strip_rxvtag) { h 1472 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); h 1477 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { h 1479 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->enable_fd(h, enable); h 1581 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h; h 1593 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h = hns3_get_handle(netdev); h 1594 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c kinfo = &h->kinfo; h 1596 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); h 1599 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; h 1614 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1617 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_vlan_filter) h 1618 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); h 1626 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1629 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_vlan_filter) h 1630 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); h 1638 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1641 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, h 1645 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_vf_vlan_filter) h 1646 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, h 1654 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1660 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (!h->ae_algo->ops->set_mtu) h 1663 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, h 1666 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_mtu(h, new_mtu); h 1679 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(ndev); h 1738 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->get_mac_stats) { h 1741 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->get_mac_stats(h, &mac_stats); h 1779 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 1787 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->reset_event) h 1788 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->reset_event(h->pdev, h); h 1795 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(dev); h 1798 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (!h->ae_algo->ops->add_arfs_entry) h 1813 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); h 2072 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 2073 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct pci_dev *pdev = h->pdev; h 2113 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (!(h->flags & HNAE3_SUPPORT_VF)) { h 2319 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) h 2324 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (unlikely(h > ring->desc_num)) h 2327 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return u > c ? (h > c && h <= u) : (h > c || h <= u); h 3316 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3329 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 3331 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c u16 tqp_num = h->kinfo.num_tqps; h 3353 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c tqp_vector->handle = h; h 3360 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->map_ring_to_vector(h, h 3385 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3388 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct pci_dev *pdev = h->pdev; h 3389 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c u16 tqp_num = h->kinfo.num_tqps; h 3405 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); h 3438 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3450 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->unmap_ring_from_vector(h, h 3469 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3470 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct pci_dev *pdev = h->pdev; h 3477 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); h 3546 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3547 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct pci_dev *pdev = h->pdev; h 3551 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c array3_size(h->kinfo.num_tqps, h 3558 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 3559 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); h 3569 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c priv->ring_data[i + h->kinfo.num_tqps].ring); h 3579 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3585 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 3588 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c priv->ring_data[i + h->kinfo.num_tqps].ring); h 3717 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3718 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c int ring_num = h->kinfo.num_tqps * 2; h 3744 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3747 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 3749 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); h 3758 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = priv->ae_handle; h 3762 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->get_mac_addr && init) { h 3763 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); h 3774 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->set_mac_addr) h 3775 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); h 3782 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 3785 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->mac_connect_phy) h 3786 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->mac_connect_phy(h); h 3793 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 3795 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->mac_disconnect_phy) h 3796 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->mac_disconnect_phy(h); h 3801 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 3804 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->restore_fd_rules) h 3805 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->restore_fd_rules(h); h 3812 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 3814 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (h->ae_algo->ops->del_all_fd_entries) h 3815 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c h->ae_algo->ops->del_all_fd_entries(h, clear_list); h 4151 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) h 4153 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct net_device *ndev = h->kinfo.netdev; h 4157 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 4163 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring = priv->ring_data[i + h->kinfo.num_tqps].ring; h 4174 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c int hns3_nic_reset_all_ring(struct hnae3_handle *h) h 4176 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct net_device *ndev = h->kinfo.netdev; h 4182 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 4183 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = h->ae_algo->ops->reset_queue(h, i); h 4196 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; h 4450 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_handle *h = hns3_get_handle(netdev); h 4451 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_knic_private_info *kinfo = &h->kinfo; h 4463 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (new_tqp_num > hns3_get_max_available_channels(h) || h 4467 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_get_max_available_channels(h)); h 4474 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, h 4478 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); h 4482 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); h 4486 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c org_tqp_num = h->kinfo.num_tqps; h 4487 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); h 4493 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); h 654 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h int hns3_nic_reset_all_ring(struct hnae3_handle *h); h 75 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(ndev); h 79 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->set_loopback || h 80 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c !h->ae_algo->ops->set_promisc_mode) h 88 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ret = h->ae_algo->ops->set_loopback(h, loop, en); h 95 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (ret || h->pdev->revision >= 0x21) h 99 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->set_promisc_mode(h, true, true); h 102 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_update_promisc_mode(ndev, h->netdev_flags); h 112 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(ndev); h 115 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ret = hns3_nic_reset_all_ring(h); h 195 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 199 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c kinfo = &h->kinfo; h 301 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 319 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c netif_dbg(h, drv, ndev, "self test start"); h 323 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->flags & HNAE3_SUPPORT_APP_LOOPBACK; h 327 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; h 332 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; h 336 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; h 344 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->enable_vlan_filter; h 346 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->enable_vlan_filter(h, false); h 353 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->halt_autoneg) h 354 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->halt_autoneg(h, true); h 378 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->halt_autoneg) h 379 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->halt_autoneg(h, false); h 383 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->enable_vlan_filter(h, true); h 389 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c netif_dbg(h, drv, ndev, "self test end\n"); h 394 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 395 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c const struct hnae3_ae_ops *ops = h->ae_algo->ops; h 402 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + h 403 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_sset_count(h, stringset)); h 406 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return ops->get_sset_count(h, stringset); h 459 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 460 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c const struct hnae3_ae_ops *ops = h->ae_algo->ops; h 468 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c buff = hns3_get_strings_tqps(h, buff); h 469 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_strings(h, stringset, (u8 *)buff); h 472 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_strings(h, stringset, data); h 516 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 524 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { h 529 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->update_stats(h, &netdev->stats); h 532 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c p = hns3_get_stats_tqps(h, p); h 535 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->get_stats(h, p); h 542 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 545 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_fw_version) { h 554 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c strncpy(drvinfo->driver, h->pdev->driver->name, h 558 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c strncpy(drvinfo->bus_info, pci_name(h->pdev), h 562 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h); h 578 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 580 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_status) h 581 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_status(h); h 590 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 591 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c int queue_num = h->kinfo.num_tqps; h 608 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 610 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_pauseparam) h 611 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, h 618 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 620 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c netif_dbg(h, drv, netdev, h 624 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->set_pauseparam) h 625 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->set_pauseparam(h, param->autoneg, h 631 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c static void hns3_get_ksettings(struct hnae3_handle *h, h 634 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c const struct hnae3_ae_ops *ops = h->ae_algo->ops; h 638 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_ksettings_an_result(h, h 645 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_link_mode(h, h 651 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, h 658 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 664 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops = h->ae_algo->ops; h 666 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c ops->get_media_type(h, &media_type, &module_type); h 673 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_get_ksettings(h, cmd); h 681 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_get_ksettings(h, cmd); h 685 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_get_ksettings(h, cmd); h 690 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_get_ksettings(h, cmd); h 809 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 811 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_rss_key_size) h 814 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_rss_key_size(h); h 819 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 821 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_rss_indir_size) h 824 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_rss_indir_size(h); h 830 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 832 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_rss) h 835 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_rss(h, indir, key, hfunc); h 841 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 843 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->set_rss) h 846 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if ((h->pdev->revision == 0x20 && h 859 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->set_rss(h, indir, key, hfunc); h 866 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 870 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c cmd->data = h->kinfo.num_tqps; h 873 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_rss_tuple) h 874 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_rss_tuple(h, cmd); h 877 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_fd_rule_cnt) h 878 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); h 881 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_fd_rule_info) h 882 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_fd_rule_info(h, cmd); h 885 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_fd_all_rules) h 886 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_fd_all_rules(h, cmd, h 897 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 900 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->kinfo.num_tx_desc = tx_desc_num; h 901 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->kinfo.num_rx_desc = rx_desc_num; h 903 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c for (i = 0; i < h->kinfo.num_tqps; i++) { h 905 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = h 955 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 960 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c u16 queue_num = h->kinfo.num_tqps; h 999 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c for (i = 0; i < h->kinfo.num_tqps * 2; i++) h 1003 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c for (i = 0; i < h->kinfo.num_tqps * 2; i++) h 1017 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1021 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->set_rss_tuple) h 1022 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->set_rss_tuple(h, cmd); h 1025 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->add_fd_entry) h 1026 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->add_fd_entry(h, cmd); h 1029 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->del_fd_entry) h 1030 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->del_fd_entry(h, cmd); h 1077 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1079 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->ae_algo->ops->get_channels) h 1080 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->get_channels(h, ch); h 1088 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 1089 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c u16 queue_num = h->kinfo.num_tqps; h 1112 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; h 1113 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; h 1224 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = priv->ae_handle; h 1225 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c int queue_num = h->kinfo.num_tqps; h 1243 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); h 1244 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); h 1250 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1251 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c u16 queue_num = h->kinfo.num_tqps; h 1262 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->kinfo.int_rl_setting = h 1273 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1275 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_regs_len) h 1278 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->get_regs_len(h); h 1284 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1286 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->get_regs) h 1289 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->ae_algo->ops->get_regs(h, &cmd->version, data); h 1295 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1297 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (!h->ae_algo->ops->set_led_id) h 1300 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->ae_algo->ops->set_led_id(h, state); h 1305 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1307 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c return h->msg_enable; h 1312 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1314 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c h->msg_enable = msg_level; h 1455 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c struct hnae3_handle *h = hns3_get_handle(netdev); h 1457 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c if (h->flags & HNAE3_SUPPORT_VF) h 65 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) h 67 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 223 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) h 225 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 226 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct net_device *netdev = h->kinfo.netdev; h 241 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c netif_dbg(h, drv, netdev, "set ets\n"); h 279 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) h 282 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 315 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) h 317 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 318 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct net_device *netdev = h->kinfo.netdev; h 345 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c netif_dbg(h, drv, netdev, h 369 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static u8 hclge_getdcbx(struct hnae3_handle *h) h 371 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 380 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) h 382 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 383 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct net_device *netdev = h->kinfo.netdev; h 386 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode); h 400 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) h 402 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c struct hclge_vport *vport = hclge_get_vport(h); h 177 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c struct hnae3_handle *h = *((void **)netdev_priv(netdev)); h 178 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c struct hclge_vport *vport = hclge_get_vport(h); h 173 drivers/net/ethernet/ibm/emac/emac.h #define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16)) h 174 drivers/net/ethernet/ibm/emac/emac.h #define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14)) h 1234 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 1262 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 1473 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 1483 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 1507 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 1512 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 1969 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 1971 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(f, h, from, hlist) { h 1992 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 1994 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(new, h, from, hlist) { h 2272 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 2309 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 2360 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) { h 2422 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { h 2471 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { h 2764 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 2767 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 2835 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 2838 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 13085 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 13292 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { h 13327 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_node *h; h 13370 drivers/net/ethernet/intel/i40e/i40e_main.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) h 40 drivers/net/ethernet/intel/i40e/i40e_osdep.h #define i40e_allocate_dma_mem(h, m, unused, s, a) \ h 41 drivers/net/ethernet/intel/i40e/i40e_osdep.h i40e_allocate_dma_mem_d(h, m, s, a) h 42 drivers/net/ethernet/intel/i40e/i40e_osdep.h #define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) h 49 drivers/net/ethernet/intel/i40e/i40e_osdep.h #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) h 50 drivers/net/ethernet/intel/i40e/i40e_osdep.h #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) h 52 drivers/net/ethernet/intel/i40e/i40e_osdep.h #define i40e_debug(h, m, s, ...) \ h 54 drivers/net/ethernet/intel/i40e/i40e_osdep.h if (((m) & (h)->debug_mask)) \ h 56 drivers/net/ethernet/intel/i40e/i40e_osdep.h (h)->bus.bus_id, (h)->bus.device, \ h 57 drivers/net/ethernet/intel/i40e/i40e_osdep.h (h)->bus.func, ##__VA_ARGS__); \ h 3133 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct hlist_node *h; h 3173 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, h 3964 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct hlist_node *h; h 4021 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) h 36 drivers/net/ethernet/intel/iavf/iavf_osdep.h #define iavf_allocate_dma_mem(h, m, unused, s, a) \ h 37 drivers/net/ethernet/intel/iavf/iavf_osdep.h iavf_allocate_dma_mem_d(h, m, s, a) h 38 drivers/net/ethernet/intel/iavf/iavf_osdep.h #define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m) h 44 drivers/net/ethernet/intel/iavf/iavf_osdep.h #define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s) h 45 drivers/net/ethernet/intel/iavf/iavf_osdep.h #define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m) h 47 drivers/net/ethernet/intel/iavf/iavf_osdep.h #define iavf_debug(h, m, s, ...) \ h 49 drivers/net/ethernet/intel/iavf/iavf_osdep.h if (((m) & (h)->debug_mask)) \ h 51 drivers/net/ethernet/intel/iavf/iavf_osdep.h (h)->bus.bus_id, (h)->bus.device, \ h 52 drivers/net/ethernet/intel/iavf/iavf_osdep.h (h)->bus.func, ##__VA_ARGS__); \ h 1583 drivers/net/ethernet/intel/ice/ice_lib.c void ice_free_fltr_list(struct device *dev, struct list_head *h) h 1587 drivers/net/ethernet/intel/ice/ice_lib.c list_for_each_entry_safe(e, tmp, h, list_entry) { h 26 drivers/net/ethernet/intel/ice/ice_lib.h void ice_free_fltr_list(struct device *dev, struct list_head *h); h 417 drivers/net/ethernet/intel/igb/e1000_regs.h #define E1000_REMOVED(h) unlikely(!(h)) h 7574 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 h, t; h 7576 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); h 7579 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (h != t) h 169 drivers/net/ethernet/intel/ixgbevf/vf.h #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) h 172 drivers/net/ethernet/intel/ixgbevf/vf.h #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) h 180 drivers/net/ethernet/intel/ixgbevf/vf.h #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) h 188 drivers/net/ethernet/intel/ixgbevf/vf.h #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) h 314 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c static inline void bitmap_iterator_init(struct bitmap_iterator *h, h 318 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c h->iterator = 0; h 319 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c h->advance_array = !bitmap_empty(stats_bitmap, count); h 320 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c h->count = h->advance_array ? bitmap_weight(stats_bitmap, count) h 322 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c h->stats_bitmap = stats_bitmap; h 325 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c static inline int bitmap_iterator_test(struct bitmap_iterator *h) h 327 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap); h 330 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c static inline int bitmap_iterator_inc(struct bitmap_iterator *h) h 332 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c return h->iterator++; h 336 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c bitmap_iterator_count(struct bitmap_iterator *h) h 338 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c return h->count; h 96 drivers/net/ethernet/mellanox/mlx5/core/health.c struct health_buffer __iomem *h = health->health; h 99 drivers/net/ethernet/mellanox/mlx5/core/health.c return (ioread32be(&h->fw_ver) == 0xffffffff); h 105 drivers/net/ethernet/mellanox/mlx5/core/health.c struct health_buffer __iomem *h = health->health; h 106 drivers/net/ethernet/mellanox/mlx5/core/health.c u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET; h 107 drivers/net/ethernet/mellanox/mlx5/core/health.c u8 synd = ioread8(&h->synd); h 371 drivers/net/ethernet/mellanox/mlx5/core/health.c struct health_buffer __iomem *h = health->health; h 377 drivers/net/ethernet/mellanox/mlx5/core/health.c if (!ioread8(&h->synd)) h 380 drivers/net/ethernet/mellanox/mlx5/core/health.c for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) h 382 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread32be(h->assert_var + i)); h 385 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread32be(&h->assert_exit_ptr)); h 387 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread32be(&h->assert_callra)); h 390 drivers/net/ethernet/mellanox/mlx5/core/health.c mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); h 391 drivers/net/ethernet/mellanox/mlx5/core/health.c mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index)); h 392 drivers/net/ethernet/mellanox/mlx5/core/health.c mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd), h 393 drivers/net/ethernet/mellanox/mlx5/core/health.c hsynd_str(ioread8(&h->synd))); h 394 drivers/net/ethernet/mellanox/mlx5/core/health.c mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); h 395 drivers/net/ethernet/mellanox/mlx5/core/health.c fw = ioread32be(&h->fw_ver); h 405 drivers/net/ethernet/mellanox/mlx5/core/health.c struct health_buffer __iomem *h = health->health; h 409 drivers/net/ethernet/mellanox/mlx5/core/health.c synd = ioread8(&h->synd); h 443 drivers/net/ethernet/mellanox/mlx5/core/health.c struct health_buffer __iomem *h = health->health; h 447 drivers/net/ethernet/mellanox/mlx5/core/health.c if (!ioread8(&h->synd)) h 460 drivers/net/ethernet/mellanox/mlx5/core/health.c for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) { h 461 drivers/net/ethernet/mellanox/mlx5/core/health.c err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); h 469 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread32be(&h->assert_exit_ptr)); h 473 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread32be(&h->assert_callra)); h 476 drivers/net/ethernet/mellanox/mlx5/core/health.c err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); h 480 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread8(&h->irisc_index)); h 483 drivers/net/ethernet/mellanox/mlx5/core/health.c err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); h 487 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread16be(&h->ext_synd)); h 491 drivers/net/ethernet/mellanox/mlx5/core/health.c ioread32be(&h->fw_ver)); h 704 drivers/net/ethernet/mellanox/mlx5/core/health.c struct health_buffer __iomem *h = health->health; h 736 drivers/net/ethernet/mellanox/mlx5/core/health.c health->synd = ioread8(&h->synd); h 74 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; h 77 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c hlist_for_each_entry(node, h, hlist) { h 562 drivers/net/ethernet/neterion/s2io.h struct RxD_t h; h 573 drivers/net/ethernet/neterion/s2io.h struct RxD_t h; h 1359 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 1405 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 1433 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 1481 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 1514 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 1537 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 1555 drivers/net/ethernet/qlogic/netxen/netxen_nic.h struct netxen_common_entry_hdr h; h 410 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb, h 415 drivers/net/ethernet/qlogic/qede/qede_filter.c hlist_for_each_entry(tpos, h, node) h 3281 drivers/net/ethernet/sun/niu.c unsigned int h = niu_hash_rxaddr(rp, addr); h 3285 drivers/net/ethernet/sun/niu.c pp = &rp->rxhash[h]; h 3300 drivers/net/ethernet/sun/niu.c unsigned int h = niu_hash_rxaddr(rp, base); h 3303 drivers/net/ethernet/sun/niu.c page->mapping = (struct address_space *) rp->rxhash[h]; h 3304 drivers/net/ethernet/sun/niu.c rp->rxhash[h] = page; h 570 drivers/net/geneve.c int h; h 584 drivers/net/geneve.c for (h = 0; h < VNI_HASH_SIZE; ++h) h 585 drivers/net/geneve.c INIT_HLIST_HEAD(&gs->vni_list[h]); h 297 drivers/net/hamradio/dmascc.c int h, i, j, n; h 313 drivers/net/hamradio/dmascc.c for (h = 0; h < NUM_TYPES; h++) { h 317 drivers/net/hamradio/dmascc.c for (i = 0; i < hw[h].num_devs; i++) h 321 drivers/net/hamradio/dmascc.c hw[h].io_region) / hw[h].io_delta; h 322 drivers/net/hamradio/dmascc.c if (j >= 0 && j < hw[h].num_devs && h 323 drivers/net/hamradio/dmascc.c hw[h].io_region + h 324 drivers/net/hamradio/dmascc.c j * hw[h].io_delta == io[i]) { h 330 drivers/net/hamradio/dmascc.c for (i = 0; i < hw[h].num_devs; i++) { h 332 drivers/net/hamradio/dmascc.c hw[h].io_region + i * hw[h].io_delta; h 337 drivers/net/hamradio/dmascc.c for (i = 0; i < hw[h].num_devs; i++) h 340 drivers/net/hamradio/dmascc.c (base[i], hw[h].io_size, "dmascc")) h 344 drivers/net/hamradio/dmascc.c base[i] + hw[h].tmr_offset + h 347 drivers/net/hamradio/dmascc.c base[i] + hw[h].tmr_offset + h 350 drivers/net/hamradio/dmascc.c base[i] + hw[h].tmr_offset + h 356 drivers/net/hamradio/dmascc.c for (i = 0; i < hw[h].num_devs; i++) h 360 drivers/net/hamradio/dmascc.c outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF, h 362 drivers/net/hamradio/dmascc.c outb((hw[h].tmr_hz / TMR_0_HZ) >> 8, h 380 drivers/net/hamradio/dmascc.c for (i = 0; i < hw[h].num_devs; i++) h 395 drivers/net/hamradio/dmascc.c for (i = 0; i < hw[h].num_devs; i++) h 399 drivers/net/hamradio/dmascc.c (setup_adapter(base[i], h, n) == 0)) h 403 drivers/net/hamradio/dmascc.c hw[h].io_size); h 447 drivers/net/macsec.c static void macsec_fill_sectag(struct macsec_eth_header *h, h 453 drivers/net/macsec.c memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); h 454 drivers/net/macsec.c h->eth.h_proto = htons(ETH_P_MACSEC); h 457 drivers/net/macsec.c h->tci_an |= MACSEC_TCI_SC; h 458 drivers/net/macsec.c memcpy(&h->secure_channel_id, &secy->sci, h 459 drivers/net/macsec.c sizeof(h->secure_channel_id)); h 462 drivers/net/macsec.c h->tci_an |= MACSEC_TCI_ES; h 464 drivers/net/macsec.c h->tci_an |= MACSEC_TCI_SCB; h 467 drivers/net/macsec.c h->packet_number = htonl(pn); h 471 drivers/net/macsec.c h->tci_an |= MACSEC_TCI_CONFID; h 473 drivers/net/macsec.c h->tci_an |= MACSEC_TCI_C; h 475 drivers/net/macsec.c h->tci_an |= tx_sc->encoding_sa; h 478 drivers/net/macsec.c static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) h 481 drivers/net/macsec.c h->short_length = data_len; h 487 drivers/net/macsec.c struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; h 489 drivers/net/macsec.c int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; h 498 drivers/net/macsec.c if (h->tci_an & MACSEC_TCI_VERSION) h 502 drivers/net/macsec.c if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && h 503 drivers/net/macsec.c (h->tci_an & MACSEC_TCI_SC)) h 507 drivers/net/macsec.c if (h->unused) h 511 drivers/net/macsec.c if (!h->packet_number) h 515 drivers/net/macsec.c if (h->short_length) h 516 drivers/net/macsec.c return len == extra_len + h->short_length; h 139 drivers/net/macvlan.c struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; h 141 drivers/net/macvlan.c hlist_for_each_entry_rcu(entry, h, hlist) { h 154 drivers/net/macvlan.c struct hlist_head *h; h 166 drivers/net/macvlan.c h = &port->vlan_source_hash[macvlan_eth_hash(addr)]; h 167 drivers/net/macvlan.c hlist_add_head_rcu(&entry->hlist, h); h 383 drivers/net/macvlan.c struct hlist_node *h, *n; h 385 drivers/net/macvlan.c hlist_for_each_safe(h, n, &port->vlan_source_hash[i]) { h 388 drivers/net/macvlan.c entry = hlist_entry(h, struct macvlan_source_entry, h 429 drivers/net/macvlan.c struct hlist_head *h = &port->vlan_source_hash[idx]; h 431 drivers/net/macvlan.c hlist_for_each_entry_rcu(entry, h, hlist) { h 1585 drivers/net/macvlan.c struct hlist_head *h = &vlan->port->vlan_source_hash[i]; h 1588 drivers/net/macvlan.c hlist_for_each_entry_rcu(entry, h, hlist) { h 575 drivers/net/phy/dp83640.c struct ethhdr *h = eth_hdr(skb); h 578 drivers/net/phy/dp83640.c !memcmp(h->h_source, status_frame_src, sizeof(status_frame_src))) h 195 drivers/net/plip/plip.c unsigned short h; h 624 drivers/net/plip/plip.c if (rcv->length.h > dev->mtu + dev->hard_header_len || h 625 drivers/net/plip/plip.c rcv->length.h < 8) { h 626 drivers/net/plip/plip.c printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h); h 630 drivers/net/plip/plip.c rcv->skb = dev_alloc_skb(rcv->length.h + 2); h 636 drivers/net/plip/plip.c skb_put(rcv->skb,rcv->length.h); h 649 drivers/net/plip/plip.c } while (++rcv->byte < rcv->length.h); h 673 drivers/net/plip/plip.c dev->stats.rx_bytes += rcv->length.h; h 833 drivers/net/plip/plip.c } while (++snd->byte < snd->length.h); h 990 drivers/net/plip/plip.c snd->length.h = skb->len; h 591 drivers/net/vxlan.c unsigned int h; h 598 drivers/net/vxlan.c for (h = 0; h < FDB_HASH_SIZE; ++h) { h 599 drivers/net/vxlan.c spin_lock_bh(&vxlan->hash_lock[h]); h 600 drivers/net/vxlan.c hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { h 611 drivers/net/vxlan.c spin_unlock_bh(&vxlan->hash_lock[h]); h 616 drivers/net/vxlan.c spin_unlock_bh(&vxlan->hash_lock[h]); h 626 drivers/net/vxlan.c unsigned int h; h 632 drivers/net/vxlan.c for (h = 0; h < FDB_HASH_SIZE; ++h) { h 633 drivers/net/vxlan.c spin_lock_bh(&vxlan->hash_lock[h]); h 634 drivers/net/vxlan.c hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) h 638 drivers/net/vxlan.c spin_unlock_bh(&vxlan->hash_lock[h]); h 1222 drivers/net/vxlan.c unsigned int h; h 1225 drivers/net/vxlan.c for (h = 0; h < FDB_HASH_SIZE; ++h) { h 1228 drivers/net/vxlan.c hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { h 2723 drivers/net/vxlan.c unsigned int h; h 2728 drivers/net/vxlan.c for (h = 0; h < FDB_HASH_SIZE; ++h) { h 2731 drivers/net/vxlan.c spin_lock(&vxlan->hash_lock[h]); h 2732 drivers/net/vxlan.c hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { h 2753 drivers/net/vxlan.c spin_unlock(&vxlan->hash_lock[h]); h 2854 drivers/net/vxlan.c unsigned int h; h 2856 drivers/net/vxlan.c for (h = 0; h < FDB_HASH_SIZE; ++h) { h 2859 drivers/net/vxlan.c spin_lock_bh(&vxlan->hash_lock[h]); h 2860 drivers/net/vxlan.c hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { h 2869 drivers/net/vxlan.c spin_unlock_bh(&vxlan->hash_lock[h]); h 3030 drivers/net/vxlan.c unsigned int h; h 3059 drivers/net/vxlan.c for (h = 0; h < FDB_HASH_SIZE; ++h) { h 3060 drivers/net/vxlan.c spin_lock_init(&vxlan->hash_lock[h]); h 3061 drivers/net/vxlan.c INIT_HLIST_HEAD(&vxlan->fdb_head[h]); h 3230 drivers/net/vxlan.c unsigned int h; h 3237 drivers/net/vxlan.c for (h = 0; h < VNI_HASH_SIZE; ++h) h 3238 drivers/net/vxlan.c INIT_HLIST_HEAD(&vs->vni_list[h]); h 4400 drivers/net/vxlan.c unsigned int h; h 4405 drivers/net/vxlan.c for (h = 0; h < PORT_HASH_SIZE; ++h) h 4406 drivers/net/vxlan.c INIT_HLIST_HEAD(&vn->sock_list[h]); h 4416 drivers/net/vxlan.c unsigned int h; h 4430 drivers/net/vxlan.c for (h = 0; h < PORT_HASH_SIZE; ++h) h 4431 drivers/net/vxlan.c WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); h 93 drivers/net/wireless/ath/ath9k/calib.c struct ath9k_nfcal_hist *h; h 98 drivers/net/wireless/ath/ath9k/calib.c h = cal->nfCalHist; h 106 drivers/net/wireless/ath/ath9k/calib.c h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; h 108 drivers/net/wireless/ath/ath9k/calib.c if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX) h 109 drivers/net/wireless/ath/ath9k/calib.c h[i].currIndex = 0; h 111 drivers/net/wireless/ath/ath9k/calib.c if (h[i].invalidNFcount > 0) { h 112 drivers/net/wireless/ath/ath9k/calib.c h[i].invalidNFcount--; h 113 drivers/net/wireless/ath/ath9k/calib.c h[i].privNF = nfarray[i]; h 115 drivers/net/wireless/ath/ath9k/calib.c h[i].privNF = h 116 drivers/net/wireless/ath/ath9k/calib.c ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); h 119 drivers/net/wireless/ath/ath9k/calib.c if (!h[i].privNF) h 122 drivers/net/wireless/ath/ath9k/calib.c if (h[i].privNF > limit->max) { h 127 drivers/net/wireless/ath/ath9k/calib.c i, h[i].privNF, limit->max, h 140 drivers/net/wireless/ath/ath9k/calib.c h[i].privNF = limit->max; h 245 drivers/net/wireless/ath/ath9k/calib.c struct ath9k_nfcal_hist *h = NULL; h 253 drivers/net/wireless/ath/ath9k/calib.c h = ah->caldata->nfCalHist; h 265 drivers/net/wireless/ath/ath9k/calib.c else if (h) h 266 drivers/net/wireless/ath/ath9k/calib.c nfval = h[i].privNF; h 405 drivers/net/wireless/ath/ath9k/calib.c struct ath9k_nfcal_hist *h; h 430 drivers/net/wireless/ath/ath9k/calib.c h = caldata->nfCalHist; h 433 drivers/net/wireless/ath/ath9k/calib.c chan->noisefloor = h[0].privNF; h 442 drivers/net/wireless/ath/ath9k/calib.c struct ath9k_nfcal_hist *h; h 447 drivers/net/wireless/ath/ath9k/calib.c h = ah->caldata->nfCalHist; h 449 drivers/net/wireless/ath/ath9k/calib.c h[i].currIndex = 0; h 450 drivers/net/wireless/ath/ath9k/calib.c h[i].privNF = ath9k_hw_get_default_nf(ah, chan, k); h 451 drivers/net/wireless/ath/ath9k/calib.c h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; h 453 drivers/net/wireless/ath/ath9k/calib.c h[i].nfCalBuffer[j] = h[i].privNF; h 969 drivers/net/wireless/ath/ath9k/debug.c struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist; h 983 drivers/net/wireless/ath/ath9k/debug.c nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount; h 984 drivers/net/wireless/ath/ath9k/debug.c seq_printf(file, " %d\t %d\t %d\t\t", i, h[i].privNF, nread); h 986 drivers/net/wireless/ath/ath9k/debug.c seq_printf(file, " %d", h[i].nfCalBuffer[j]); h 1471 drivers/net/wireless/broadcom/b43legacy/phy.c u16 h; h 1541 drivers/net/wireless/broadcom/b43legacy/phy.c for (h = 0; h < 10; h++) { h 1543 drivers/net/wireless/broadcom/b43legacy/phy.c i = pairorder[h]; h 258 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c struct brcmf_proto_bcdc_header *h; h 265 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h = (struct brcmf_proto_bcdc_header *)(pktbuf->data); h 267 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h->flags = (BCDC_PROTO_VER << BCDC_FLAG_VER_SHIFT); h 269 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h->flags |= BCDC_FLAG_SUM_NEEDED; h 271 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h->priority = (pktbuf->priority & BCDC_PRIORITY_MASK); h 272 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h->flags2 = 0; h 273 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h->data_offset = offset; h 274 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c BCDC_SET_IF_IDX(h, ifidx); h 282 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c struct brcmf_proto_bcdc_header *h; h 295 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c h = (struct brcmf_proto_bcdc_header *)(pktbuf->data); h 297 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c tmp_if = brcmf_get_ifp(drvr, BCDC_GET_IF_IDX(h)); h 302 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) != h 305 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c brcmf_ifname(tmp_if), h->flags); h 309 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c if (h->flags & BCDC_FLAG_SUM_GOOD) { h 311 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c brcmf_ifname(tmp_if), h->flags); h 315 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c pktbuf->priority = h->priority & BCDC_PRIORITY_MASK; h 319 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c brcmf_fws_hdrpull(tmp_if, h->data_offset << 2, pktbuf); h 321 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c skb_pull(pktbuf, h->data_offset << 2); h 581 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) h 585 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; h 587 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c while (i != h->slot_pos) { h 588 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (h->items[i].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { h 589 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->slot_pos = i; h 597 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->failed_slotfind++; h 603 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, h 609 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_FREE) { h 611 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->failed_to_push++; h 615 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE; h 616 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->items[slot_id].pkt = pkt; h 617 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->pushed++; h 621 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, h 628 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { h 630 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->failed_to_pop++; h 634 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c *pktout = h->items[slot_id].pkt; h 636 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; h 637 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->items[slot_id].pkt = NULL; h 638 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->popped++; h 665 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, h 671 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { h 676 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED; h 684 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c struct brcmf_fws_hanger *h = &fws->hanger; h 689 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c for (i = 0; i < ARRAY_SIZE(h->items); i++) { h 690 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c s = h->items[i].state; h 693 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c skb = h->items[i].pkt; h 698 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c h->items[i].state = h 847 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c struct ieee80211_hdr *h; h 943 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN); h 944 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT; h 338 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static uint ntxdactive(struct dma_info *di, uint h, uint t) h 340 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c return txd(di, t-h); h 343 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static uint nrxdactive(struct dma_info *di, uint h, uint t) h 345 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c return rxd(di, t-h); h 845 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c struct ieee80211_hdr *h; h 894 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN); h 926 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c lastframe = !ieee80211_has_morefrags(h->frame_control); h 6213 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c struct ieee80211_hdr *h; h 6246 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c h = (struct ieee80211_hdr *)(p->data); h 6247 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c qos = ieee80211_is_data_qos(h->frame_control); h 6277 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c seq = le16_to_cpu(h->seq_ctrl) & FRAGNUM_MASK; h 6279 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c h->seq_ctrl = cpu_to_le16(seq); h 6288 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (ieee80211_is_beacon(h->frame_control)) h 6343 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (!is_multicast_ether_addr(h->addr1)) { h 6482 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if ((ieee80211_is_data(h->frame_control) || h 6483 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c ieee80211_is_mgmt(h->frame_control)) && h 6484 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c (phylen > wlc->RTSThresh) && !is_multicast_ether_addr(h->addr1)) h 6506 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (!ieee80211_is_pspoll(h->frame_control) && h 6507 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c !is_multicast_ether_addr(h->addr1) && !use_rifs) { h 6511 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c h->duration_id = cpu_to_le16(durid); h 6519 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c h->duration_id = cpu_to_le16(durid); h 6523 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (ieee80211_is_pspoll(h->frame_control)) h 6524 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c txh->FragDurFallback = h->duration_id; h 6525 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c else if (is_multicast_ether_addr(h->addr1) || use_rifs) h 6537 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (!is_multicast_ether_addr(h->addr1)) h 6563 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c memcpy(&txh->MacFrameControl, &h->frame_control, sizeof(u16)); h 6569 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c memcpy(&txh->TxFrameRA, &h->addr1, ETH_ALEN); h 6657 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c memcpy(&rts->ra, &h->addr2, ETH_ALEN); h 6662 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN); h 7656 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c struct ieee80211_hdr *h; h 7677 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c h = (struct ieee80211_hdr *)(p->data + D11_PHY_HDR_LEN); h 7686 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (len < D11_PHY_HDR_LEN + sizeof(h->frame_control)) h 8350 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_fw_header *h = h 8353 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) { h 8357 drivers/net/wireless/intel/ipw2x00/ipw2100.c h->version); h 8361 drivers/net/wireless/intel/ipw2x00/ipw2100.c fw->version = h->version; h 8363 drivers/net/wireless/intel/ipw2x00/ipw2100.c fw->fw.size = h->fw_size; h 8364 drivers/net/wireless/intel/ipw2x00/ipw2100.c fw->uc.data = fw->fw.data + h->fw_size; h 8365 drivers/net/wireless/intel/ipw2x00/ipw2100.c fw->uc.size = h->uc_size; h 58 drivers/net/wireless/intersil/prism54/islpci_mgt.c pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h) h 60 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->version = PIMFOR_VERSION; h 61 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->operation = operation; h 62 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->device_id = PIMFOR_DEV_ID_MHLI_MIB; h 63 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->flags = 0; h 64 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->oid = cpu_to_be32(oid); h 65 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->length = cpu_to_be32(length); h 74 drivers/net/wireless/intersil/prism54/islpci_mgt.c pimfor_header_t *h = data; h 76 drivers/net/wireless/intersil/prism54/islpci_mgt.c while ((void *) h < data + len) { h 77 drivers/net/wireless/intersil/prism54/islpci_mgt.c if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) { h 78 drivers/net/wireless/intersil/prism54/islpci_mgt.c le32_to_cpus(&h->oid); h 79 drivers/net/wireless/intersil/prism54/islpci_mgt.c le32_to_cpus(&h->length); h 81 drivers/net/wireless/intersil/prism54/islpci_mgt.c be32_to_cpus(&h->oid); h 82 drivers/net/wireless/intersil/prism54/islpci_mgt.c be32_to_cpus(&h->length); h 84 drivers/net/wireless/intersil/prism54/islpci_mgt.c if (h->oid != OID_INL_TUNNEL) h 85 drivers/net/wireless/intersil/prism54/islpci_mgt.c return h; h 86 drivers/net/wireless/intersil/prism54/islpci_mgt.c h++; h 194 drivers/net/wireless/intersil/prism54/islpci_mgt.c pimfor_header_t *h = buf.mem; h 197 drivers/net/wireless/intersil/prism54/islpci_mgt.c h->operation, oid, h->device_id, h->flags, length); h 200 drivers/net/wireless/intersil/prism54/islpci_mgt.c display_buffer((char *) h, sizeof (pimfor_header_t)); h 152 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c const struct mt76x02_fw_header *h) h 154 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c u16 bld = le16_to_cpu(h->build_ver); h 155 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c u16 ver = le16_to_cpu(h->fw_ver); h 102 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h const struct mt76x02_fw_header *h); h 279 drivers/net/wireless/mediatek/mt7601u/trace.h struct mt76_sta *sta, struct mt76_txwi *h), h 280 drivers/net/wireless/mediatek/mt7601u/trace.h TP_ARGS(dev, skb, sta, h), h 283 drivers/net/wireless/mediatek/mt7601u/trace.h __field_struct(struct mt76_txwi, h) h 289 drivers/net/wireless/mediatek/mt7601u/trace.h __entry->h = *h; h 296 drivers/net/wireless/mediatek/mt7601u/trace.h le16_to_cpu(__entry->h.flags), h 297 drivers/net/wireless/mediatek/mt7601u/trace.h le16_to_cpu(__entry->h.rate_ctl), h 298 drivers/net/wireless/mediatek/mt7601u/trace.h __entry->h.ack_ctl, __entry->h.wcid, h 299 drivers/net/wireless/mediatek/mt7601u/trace.h le16_to_cpu(__entry->h.len_ctl)) h 46 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l)) h 50 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((u32)l) h 52 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l)) h 56 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((u32)l) h 252 drivers/net/wireless/ti/wlcore/event.c int h; h 254 drivers/net/wireless/ti/wlcore/event.c for_each_set_bit(h, &sta_bitmap, wl->num_links) { h 258 drivers/net/wireless/ti/wlcore/event.c if (!test_bit(h, wlvif->ap.sta_hlid_map)) h 267 drivers/net/wireless/ti/wlcore/event.c addr = wl->links[h].addr; h 272 drivers/net/wireless/ti/wlcore/event.c wl1271_debug(DEBUG_EVENT, "remove sta %d", h); h 553 drivers/net/wireless/ti/wlcore/tx.c int i, h, start_hlid; h 560 drivers/net/wireless/ti/wlcore/tx.c h = (start_hlid + i) % wl->num_links; h 563 drivers/net/wireless/ti/wlcore/tx.c if (!test_bit(h, wlvif->links_map)) h 566 drivers/net/wireless/ti/wlcore/tx.c skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, h 571 drivers/net/wireless/ti/wlcore/tx.c wlvif->last_tx_hlid = h; h 3297 drivers/nvme/host/core.c struct nvme_ns_head *h; h 3301 drivers/nvme/host/core.c list_for_each_entry(h, &subsys->nsheads, entry) { h 3302 drivers/nvme/host/core.c if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) h 3303 drivers/nvme/host/core.c return h; h 3312 drivers/nvme/host/core.c struct nvme_ns_head *h; h 3316 drivers/nvme/host/core.c list_for_each_entry(h, &subsys->nsheads, entry) { h 3318 drivers/nvme/host/core.c !list_empty(&h->list) && h 3319 drivers/nvme/host/core.c nvme_ns_ids_equal(&new->ids, &h->ids)) h 17 drivers/nvme/host/multipath.c struct nvme_ns_head *h; h 20 drivers/nvme/host/multipath.c list_for_each_entry(h, &subsys->nsheads, entry) h 21 drivers/nvme/host/multipath.c if (h->disk) h 22 drivers/nvme/host/multipath.c blk_mq_unfreeze_queue(h->disk->queue); h 27 drivers/nvme/host/multipath.c struct nvme_ns_head *h; h 30 drivers/nvme/host/multipath.c list_for_each_entry(h, &subsys->nsheads, entry) h 31 drivers/nvme/host/multipath.c if (h->disk) h 32 drivers/nvme/host/multipath.c blk_mq_freeze_queue_wait(h->disk->queue); h 37 drivers/nvme/host/multipath.c struct nvme_ns_head *h; h 40 drivers/nvme/host/multipath.c list_for_each_entry(h, &subsys->nsheads, entry) h 41 drivers/nvme/host/multipath.c if (h->disk) h 42 drivers/nvme/host/multipath.c blk_freeze_queue_start(h->disk->queue); h 750 drivers/pci/controller/pci-aardvark.c static int advk_pcie_irq_map(struct irq_domain *h, h 753 drivers/pci/controller/pci-aardvark.c struct advk_pcie *pcie = h->host_data; h 158 drivers/perf/arm-ccn.c u64 l, h; h 467 drivers/perf/arm-ccn.c return &ccn->dt.cmp_mask[i].h; h 959 drivers/perf/arm-ccn.c u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; h 1249 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0; h 1251 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0; h 1253 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15); h 1255 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); h 24 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c #define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l))) h 140 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l) h 143 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c *h = get_count_order(pf->mask) - *l; h 163 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c int nbits_l, nbits_h, h, l; h 169 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c h = (mtk_r32(hw, pf->index, pf->offset + pf->next)) h 172 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c *value = (h << nbits_l) | l; h 897 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c #define NPCM7XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k) \ h 900 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c .fn2 = fn_ ## h, .reg2 = NPCM7XX_GCR_ ## i, .bit2 = j, \ h 224 drivers/pinctrl/samsung/pinctrl-exynos.c static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq, h 227 drivers/pinctrl/samsung/pinctrl-exynos.c struct samsung_pin_bank *b = h->host_data; h 414 drivers/pinctrl/samsung/pinctrl-s3c24xx.c static int s3c24xx_gpf_irq_map(struct irq_domain *h, unsigned int virq, h 417 drivers/pinctrl/samsung/pinctrl-s3c24xx.c struct s3c24xx_eint_domain_data *ddata = h->host_data; h 443 drivers/pinctrl/samsung/pinctrl-s3c24xx.c static int s3c24xx_gpg_irq_map(struct irq_domain *h, unsigned int virq, h 446 drivers/pinctrl/samsung/pinctrl-s3c24xx.c struct s3c24xx_eint_domain_data *ddata = h->host_data; h 382 drivers/pinctrl/samsung/pinctrl-s3c64xx.c static int s3c64xx_gpio_irq_map(struct irq_domain *h, unsigned int virq, h 385 drivers/pinctrl/samsung/pinctrl-s3c64xx.c struct samsung_pin_bank *bank = h->host_data; h 654 drivers/pinctrl/samsung/pinctrl-s3c64xx.c static int s3c64xx_eint0_irq_map(struct irq_domain *h, unsigned int virq, h 657 drivers/pinctrl/samsung/pinctrl-s3c64xx.c struct s3c64xx_eint0_domain_data *ddata = h->host_data; h 142 drivers/power/supply/bd70528-charger.c irqreturn_t (*h)(int irq, void *arg); h 151 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-ov-res", .h = BD_IRQ_HND(BAT_OV_RES) }, h 152 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-ov-det", .h = BD_IRQ_HND(BAT_OV_DET) }, h 153 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-dead", .h = BD_IRQ_HND(DBAT_DET) }, h 154 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-warmed", .h = BD_IRQ_HND(COLD_RES) }, h 155 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-cold", .h = BD_IRQ_HND(COLD_DET) }, h 156 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-cooled", .h = BD_IRQ_HND(HOT_RES) }, h 157 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-hot", .h = BD_IRQ_HND(HOT_DET) }, h 158 drivers/power/supply/bd70528-charger.c { .n = "bd70528-chg-tshd", .h = BD_IRQ_HND(CHG_TSD) }, h 159 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-removed", .h = BD_IRQ_HND(BAT_RMV) }, h 160 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-detected", .h = BD_IRQ_HND(BAT_DET) }, h 161 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-ov-res", .h = BD_IRQ_HND(DCIN2_OV_RES) }, h 162 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-ov-det", .h = BD_IRQ_HND(DCIN2_OV_DET) }, h 163 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-removed", .h = BD_IRQ_HND(DCIN2_RMV) }, h 164 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-detected", .h = BD_IRQ_HND(DCIN2_DET) }, h 165 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin1-removed", .h = BD_IRQ_HND(DCIN1_RMV) }, h 166 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin1-detected", .h = BD_IRQ_HND(DCIN1_DET) }, h 177 drivers/power/supply/bd70528-charger.c bd70528_chg_irqs[i].h, h 779 drivers/powercap/intel_rapl_common.c u32 l, h = 0; h 783 drivers/powercap/intel_rapl_common.c rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); h 789 drivers/powercap/intel_rapl_common.c wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 816 drivers/powercap/intel_rapl_common.c u32 l, h; h 825 drivers/powercap/intel_rapl_common.c rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); h 832 drivers/powercap/intel_rapl_common.c wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 57 drivers/ps3/ps3-sys-manager.c const struct ps3_sys_manager_header *h, const char *func, int line) h 59 drivers/ps3/ps3-sys-manager.c pr_debug("%s:%d: version: %xh\n", func, line, h->version); h 60 drivers/ps3/ps3-sys-manager.c pr_debug("%s:%d: size: %xh\n", func, line, h->size); h 61 drivers/ps3/ps3-sys-manager.c pr_debug("%s:%d: payload_size: %xh\n", func, line, h->payload_size); h 62 drivers/ps3/ps3-sys-manager.c pr_debug("%s:%d: service_id: %xh\n", func, line, h->service_id); h 63 drivers/ps3/ps3-sys-manager.c pr_debug("%s:%d: request_tag: %xh\n", func, line, h->request_tag); h 401 drivers/s390/char/raw3270.c short h; /* Heigth of usavle area */ h 484 drivers/s390/char/raw3270.c rp->rows = uap->uab.h; h 461 drivers/scsi/3w-9xxx.h #define TW_PRINTK(h,a,b,c) { \ h 462 drivers/scsi/3w-9xxx.h if (h) \ h 463 drivers/scsi/3w-9xxx.h printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ h 191 drivers/scsi/3w-sas.h #define TW_PRINTK(h,a,b,c) { \ h 192 drivers/scsi/3w-sas.h if (h) \ h 193 drivers/scsi/3w-sas.h printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ h 157 drivers/scsi/53c700.c STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); h 1286 drivers/scsi/BusLogic.h static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *); h 2305 drivers/scsi/advansys.c static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h) h 2307 drivers/scsi/advansys.c printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h); h 2310 drivers/scsi/advansys.c "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl); h 2312 drivers/scsi/advansys.c printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type, h 2313 drivers/scsi/advansys.c (unsigned)h->init_sdtr); h 2316 drivers/scsi/advansys.c "chip_no 0x%x,\n", (unsigned)h->sdtr_done, h 2317 drivers/scsi/advansys.c (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready, h 2318 drivers/scsi/advansys.c (unsigned)h->chip_no); h 2321 drivers/scsi/advansys.c "%u,\n", (unsigned)h->queue_full_or_busy, h 2322 drivers/scsi/advansys.c (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); h 2325 drivers/scsi/advansys.c "in_critical_cnt %u,\n", (unsigned)h->is_in_int, h 2326 drivers/scsi/advansys.c (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng, h 2327 drivers/scsi/advansys.c (unsigned)h->in_critical_cnt); h 2330 drivers/scsi/advansys.c "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage, h 2331 drivers/scsi/advansys.c (unsigned)h->init_state, (unsigned)h->no_scam, h 2332 drivers/scsi/advansys.c (unsigned)h->pci_fix_asyn_xfer); h 2334 drivers/scsi/advansys.c printk(" cfg 0x%lx\n", (ulong)h->cfg); h 2340 drivers/scsi/advansys.c static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h) h 2342 drivers/scsi/advansys.c printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h); h 2345 drivers/scsi/advansys.c h->can_tagged_qng, h->cmd_qng_enabled); h 2347 drivers/scsi/advansys.c h->disc_enable, h->sdtr_enable); h 2350 drivers/scsi/advansys.c "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed, h 2351 drivers/scsi/advansys.c h->isa_dma_channel, h->chip_version); h 2354 drivers/scsi/advansys.c h->mcode_date, h->mcode_version); h 2362 drivers/scsi/advansys.c static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) h 2364 drivers/scsi/advansys.c printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h); h 2367 drivers/scsi/advansys.c (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able); h 2370 drivers/scsi/advansys.c (unsigned)h->sdtr_able, (unsigned)h->wdtr_able); h 2373 drivers/scsi/advansys.c (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); h 2376 drivers/scsi/advansys.c (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, h 2377 drivers/scsi/advansys.c h->carr_freelist); h 2379 drivers/scsi/advansys.c printk(" icq_sp 0x%p, irq_sp 0x%p\n", h->icq_sp, h->irq_sp); h 2382 drivers/scsi/advansys.c (unsigned)h->no_scam, (unsigned)h->tagqng_able); h 2385 drivers/scsi/advansys.c (unsigned)h->chip_scsi_id, (ulong)h->cfg); h 2393 drivers/scsi/advansys.c static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h) h 2395 drivers/scsi/advansys.c printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h); h 2398 drivers/scsi/advansys.c h->disc_enable, h->termination); h 2401 drivers/scsi/advansys.c h->chip_version, h->mcode_date); h 2404 drivers/scsi/advansys.c h->mcode_version, h->control_flag); h 113 drivers/scsi/arcmsr/arcmsr_hba.c static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); h 333 drivers/scsi/arm/fas216.h extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); h 1498 drivers/scsi/be2iscsi/be_cmds.c be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, h 1509 drivers/scsi/be2iscsi/be_cmds.c ret, ioctl->h.resp_hdr.status); h 1535 drivers/scsi/be2iscsi/be_cmds.c be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, h 1578 drivers/scsi/be2iscsi/be_cmds.c be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, h 753 drivers/scsi/be2iscsi/be_cmds.h } h; h 778 drivers/scsi/be2iscsi/be_cmds.h } h; h 1258 drivers/scsi/be2iscsi/be_cmds.h } h; h 27 drivers/scsi/bfa/bfad_im.c static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); h 319 drivers/scsi/device_handler/scsi_dh_alua.c static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, h 357 drivers/scsi/device_handler/scsi_dh_alua.c spin_lock(&h->pg_lock); h 358 drivers/scsi/device_handler/scsi_dh_alua.c old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); h 361 drivers/scsi/device_handler/scsi_dh_alua.c if (h->pg) { h 363 drivers/scsi/device_handler/scsi_dh_alua.c list_del_rcu(&h->node); h 366 drivers/scsi/device_handler/scsi_dh_alua.c rcu_assign_pointer(h->pg, pg); h 372 drivers/scsi/device_handler/scsi_dh_alua.c list_add_rcu(&h->node, &pg->dh_list); h 375 drivers/scsi/device_handler/scsi_dh_alua.c alua_rtpg_queue(rcu_dereference_protected(h->pg, h 376 drivers/scsi/device_handler/scsi_dh_alua.c lockdep_is_held(&h->pg_lock)), h 378 drivers/scsi/device_handler/scsi_dh_alua.c spin_unlock(&h->pg_lock); h 654 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h; h 659 drivers/scsi/device_handler/scsi_dh_alua.c list_for_each_entry_rcu(h, h 662 drivers/scsi/device_handler/scsi_dh_alua.c BUG_ON(!h->sdev); h 663 drivers/scsi/device_handler/scsi_dh_alua.c h->sdev->access_state = desc[0]; h 700 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h; h 707 drivers/scsi/device_handler/scsi_dh_alua.c list_for_each_entry_rcu(h, &pg->dh_list, node) { h 708 drivers/scsi/device_handler/scsi_dh_alua.c BUG_ON(!h->sdev); h 709 drivers/scsi/device_handler/scsi_dh_alua.c h->sdev->access_state = h 712 drivers/scsi/device_handler/scsi_dh_alua.c h->sdev->access_state |= h 942 drivers/scsi/device_handler/scsi_dh_alua.c static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) h 946 drivers/scsi/device_handler/scsi_dh_alua.c mutex_lock(&h->init_mutex); h 949 drivers/scsi/device_handler/scsi_dh_alua.c err = alua_check_vpd(sdev, h, tpgs); h 950 drivers/scsi/device_handler/scsi_dh_alua.c h->init_error = err; h 951 drivers/scsi/device_handler/scsi_dh_alua.c mutex_unlock(&h->init_mutex); h 965 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h = sdev->handler_data; h 981 drivers/scsi/device_handler/scsi_dh_alua.c pg = rcu_dereference(h->pg); h 1010 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h = sdev->handler_data; h 1023 drivers/scsi/device_handler/scsi_dh_alua.c mutex_lock(&h->init_mutex); h 1025 drivers/scsi/device_handler/scsi_dh_alua.c pg = rcu_dereference(h->pg); h 1029 drivers/scsi/device_handler/scsi_dh_alua.c err = h->init_error; h 1030 drivers/scsi/device_handler/scsi_dh_alua.c mutex_unlock(&h->init_mutex); h 1034 drivers/scsi/device_handler/scsi_dh_alua.c mutex_unlock(&h->init_mutex); h 1055 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h = sdev->handler_data; h 1059 drivers/scsi/device_handler/scsi_dh_alua.c pg = rcu_dereference(h->pg); h 1078 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h = sdev->handler_data; h 1083 drivers/scsi/device_handler/scsi_dh_alua.c pg = rcu_dereference(h->pg); h 1103 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h = sdev->handler_data; h 1105 drivers/scsi/device_handler/scsi_dh_alua.c alua_initialize(sdev, h); h 1114 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h; h 1117 drivers/scsi/device_handler/scsi_dh_alua.c h = kzalloc(sizeof(*h) , GFP_KERNEL); h 1118 drivers/scsi/device_handler/scsi_dh_alua.c if (!h) h 1120 drivers/scsi/device_handler/scsi_dh_alua.c spin_lock_init(&h->pg_lock); h 1121 drivers/scsi/device_handler/scsi_dh_alua.c rcu_assign_pointer(h->pg, NULL); h 1122 drivers/scsi/device_handler/scsi_dh_alua.c h->init_error = SCSI_DH_OK; h 1123 drivers/scsi/device_handler/scsi_dh_alua.c h->sdev = sdev; h 1124 drivers/scsi/device_handler/scsi_dh_alua.c INIT_LIST_HEAD(&h->node); h 1126 drivers/scsi/device_handler/scsi_dh_alua.c mutex_init(&h->init_mutex); h 1127 drivers/scsi/device_handler/scsi_dh_alua.c err = alua_initialize(sdev, h); h 1131 drivers/scsi/device_handler/scsi_dh_alua.c sdev->handler_data = h; h 1134 drivers/scsi/device_handler/scsi_dh_alua.c kfree(h); h 1144 drivers/scsi/device_handler/scsi_dh_alua.c struct alua_dh_data *h = sdev->handler_data; h 1147 drivers/scsi/device_handler/scsi_dh_alua.c spin_lock(&h->pg_lock); h 1148 drivers/scsi/device_handler/scsi_dh_alua.c pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); h 1149 drivers/scsi/device_handler/scsi_dh_alua.c rcu_assign_pointer(h->pg, NULL); h 1150 drivers/scsi/device_handler/scsi_dh_alua.c h->sdev = NULL; h 1151 drivers/scsi/device_handler/scsi_dh_alua.c spin_unlock(&h->pg_lock); h 1154 drivers/scsi/device_handler/scsi_dh_alua.c list_del_rcu(&h->node); h 1159 drivers/scsi/device_handler/scsi_dh_alua.c kfree(h); h 334 drivers/scsi/device_handler/scsi_dh_emc.c struct clariion_dh_data *h = sdev->handler_data; h 336 drivers/scsi/device_handler/scsi_dh_emc.c if (h->lun_state != CLARIION_LUN_OWNED) { h 475 drivers/scsi/device_handler/scsi_dh_emc.c struct clariion_dh_data *h; h 478 drivers/scsi/device_handler/scsi_dh_emc.c h = kzalloc(sizeof(*h) , GFP_KERNEL); h 479 drivers/scsi/device_handler/scsi_dh_emc.c if (!h) h 481 drivers/scsi/device_handler/scsi_dh_emc.c h->lun_state = CLARIION_LUN_UNINITIALIZED; h 482 drivers/scsi/device_handler/scsi_dh_emc.c h->default_sp = CLARIION_UNBOUND_LU; h 483 drivers/scsi/device_handler/scsi_dh_emc.c h->current_sp = CLARIION_UNBOUND_LU; h 485 drivers/scsi/device_handler/scsi_dh_emc.c err = clariion_std_inquiry(sdev, h); h 489 drivers/scsi/device_handler/scsi_dh_emc.c err = clariion_send_inquiry(sdev, h); h 495 drivers/scsi/device_handler/scsi_dh_emc.c CLARIION_NAME, h->current_sp + 'A', h 496 drivers/scsi/device_handler/scsi_dh_emc.c h->port, lun_state[h->lun_state], h 497 drivers/scsi/device_handler/scsi_dh_emc.c h->default_sp + 'A'); h 499 drivers/scsi/device_handler/scsi_dh_emc.c sdev->handler_data = h; h 503 drivers/scsi/device_handler/scsi_dh_emc.c kfree(h); h 43 drivers/scsi/device_handler/scsi_dh_hp_sw.c static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, h 59 drivers/scsi/device_handler/scsi_dh_hp_sw.c h->path_state = HP_SW_PATH_PASSIVE; h 81 drivers/scsi/device_handler/scsi_dh_hp_sw.c static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) h 94 drivers/scsi/device_handler/scsi_dh_hp_sw.c ret = tur_done(sdev, h, &sshdr); h 102 drivers/scsi/device_handler/scsi_dh_hp_sw.c h->path_state = HP_SW_PATH_ACTIVE; h 117 drivers/scsi/device_handler/scsi_dh_hp_sw.c static int hp_sw_start_stop(struct hp_sw_dh_data *h) h 121 drivers/scsi/device_handler/scsi_dh_hp_sw.c struct scsi_device *sdev = h->sdev; h 164 drivers/scsi/device_handler/scsi_dh_hp_sw.c struct hp_sw_dh_data *h = sdev->handler_data; h 166 drivers/scsi/device_handler/scsi_dh_hp_sw.c if (h->path_state != HP_SW_PATH_ACTIVE) { h 188 drivers/scsi/device_handler/scsi_dh_hp_sw.c struct hp_sw_dh_data *h = sdev->handler_data; h 190 drivers/scsi/device_handler/scsi_dh_hp_sw.c ret = hp_sw_tur(sdev, h); h 192 drivers/scsi/device_handler/scsi_dh_hp_sw.c if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) h 193 drivers/scsi/device_handler/scsi_dh_hp_sw.c ret = hp_sw_start_stop(h); h 202 drivers/scsi/device_handler/scsi_dh_hp_sw.c struct hp_sw_dh_data *h; h 205 drivers/scsi/device_handler/scsi_dh_hp_sw.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 206 drivers/scsi/device_handler/scsi_dh_hp_sw.c if (!h) h 208 drivers/scsi/device_handler/scsi_dh_hp_sw.c h->path_state = HP_SW_PATH_UNINITIALIZED; h 209 drivers/scsi/device_handler/scsi_dh_hp_sw.c h->retries = HP_SW_RETRIES; h 210 drivers/scsi/device_handler/scsi_dh_hp_sw.c h->sdev = sdev; h 212 drivers/scsi/device_handler/scsi_dh_hp_sw.c ret = hp_sw_tur(sdev, h); h 215 drivers/scsi/device_handler/scsi_dh_hp_sw.c if (h->path_state == HP_SW_PATH_UNINITIALIZED) { h 221 drivers/scsi/device_handler/scsi_dh_hp_sw.c HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? h 224 drivers/scsi/device_handler/scsi_dh_hp_sw.c sdev->handler_data = h; h 227 drivers/scsi/device_handler/scsi_dh_hp_sw.c kfree(h); h 229 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h; h 301 drivers/scsi/device_handler/scsi_dh_rdac.c lun_table[qdata->h->lun] = 0x81; h 362 drivers/scsi/device_handler/scsi_dh_rdac.c static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, h 366 drivers/scsi/device_handler/scsi_dh_rdac.c struct c8_inquiry *inqp = &h->inq.c8; h 375 drivers/scsi/device_handler/scsi_dh_rdac.c h->lun = inqp->lun[7]; /* Uses only the last byte */ h 388 drivers/scsi/device_handler/scsi_dh_rdac.c static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) h 392 drivers/scsi/device_handler/scsi_dh_rdac.c struct c9_inquiry *inqp = &h->inq.c9; h 394 drivers/scsi/device_handler/scsi_dh_rdac.c h->state = RDAC_STATE_ACTIVE; h 399 drivers/scsi/device_handler/scsi_dh_rdac.c h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ h 401 drivers/scsi/device_handler/scsi_dh_rdac.c h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */ h 403 drivers/scsi/device_handler/scsi_dh_rdac.c h->mode = RDAC_MODE; /* LUN in RDAC mode */ h 407 drivers/scsi/device_handler/scsi_dh_rdac.c h->lun_state = RDAC_LUN_OWNED; h 410 drivers/scsi/device_handler/scsi_dh_rdac.c h->lun_state = RDAC_LUN_UNOWNED; h 411 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->mode == RDAC_MODE) { h 412 drivers/scsi/device_handler/scsi_dh_rdac.c h->state = RDAC_STATE_PASSIVE; h 420 drivers/scsi/device_handler/scsi_dh_rdac.c h->preferred = RDAC_PREFERRED; h 423 drivers/scsi/device_handler/scsi_dh_rdac.c h->preferred = RDAC_NON_PREFERRED; h 425 drivers/scsi/device_handler/scsi_dh_rdac.c list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) { h 438 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h, char *array_name, u8 *array_id) h 441 drivers/scsi/device_handler/scsi_dh_rdac.c struct c4_inquiry *inqp = &h->inq.c4; h 452 drivers/scsi/device_handler/scsi_dh_rdac.c h->ctlr = get_controller(index, array_name, array_id, sdev); h 453 drivers/scsi/device_handler/scsi_dh_rdac.c if (!h->ctlr) h 456 drivers/scsi/device_handler/scsi_dh_rdac.c list_add_rcu(&h->node, &h->ctlr->dh_list); h 457 drivers/scsi/device_handler/scsi_dh_rdac.c h->sdev = sdev; h 465 drivers/scsi/device_handler/scsi_dh_rdac.c static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) h 468 drivers/scsi/device_handler/scsi_dh_rdac.c struct c2_inquiry *inqp = &h->inq.c2; h 477 drivers/scsi/device_handler/scsi_dh_rdac.c h->ctlr->use_ms10 = 1; h 479 drivers/scsi/device_handler/scsi_dh_rdac.c h->ctlr->use_ms10 = 0; h 489 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h = sdev->handler_data; h 520 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index, h 532 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h = sdev->handler_data; h 555 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index, h 558 drivers/scsi/device_handler/scsi_dh_rdac.c if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select, h 568 drivers/scsi/device_handler/scsi_dh_rdac.c h->state = RDAC_STATE_ACTIVE; h 571 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index); h 577 drivers/scsi/device_handler/scsi_dh_rdac.c qdata->h->state = RDAC_STATE_ACTIVE; h 595 drivers/scsi/device_handler/scsi_dh_rdac.c qdata->h = sdev->handler_data; h 599 drivers/scsi/device_handler/scsi_dh_rdac.c ctlr = qdata->h->ctlr; h 614 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h = sdev->handler_data; h 618 drivers/scsi/device_handler/scsi_dh_rdac.c err = check_ownership(sdev, h); h 622 drivers/scsi/device_handler/scsi_dh_rdac.c switch (h->mode) { h 624 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->lun_state == RDAC_LUN_UNOWNED) h 628 drivers/scsi/device_handler/scsi_dh_rdac.c if ((h->lun_state == RDAC_LUN_UNOWNED) && h 629 drivers/scsi/device_handler/scsi_dh_rdac.c (h->preferred == RDAC_PREFERRED)) h 649 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h = sdev->handler_data; h 651 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->state != RDAC_STATE_ACTIVE) { h 662 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h = sdev->handler_data; h 666 drivers/scsi/device_handler/scsi_dh_rdac.c (char *) h->ctlr->array_name, h->ctlr->index, h 703 drivers/scsi/device_handler/scsi_dh_rdac.c h->state = RDAC_STATE_PASSIVE; h 726 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h; h 731 drivers/scsi/device_handler/scsi_dh_rdac.c h = kzalloc(sizeof(*h) , GFP_KERNEL); h 732 drivers/scsi/device_handler/scsi_dh_rdac.c if (!h) h 734 drivers/scsi/device_handler/scsi_dh_rdac.c h->lun = UNINITIALIZED_LUN; h 735 drivers/scsi/device_handler/scsi_dh_rdac.c h->state = RDAC_STATE_ACTIVE; h 737 drivers/scsi/device_handler/scsi_dh_rdac.c err = get_lun_info(sdev, h, array_name, array_id); h 741 drivers/scsi/device_handler/scsi_dh_rdac.c err = initialize_controller(sdev, h, array_name, array_id); h 745 drivers/scsi/device_handler/scsi_dh_rdac.c err = check_ownership(sdev, h); h 749 drivers/scsi/device_handler/scsi_dh_rdac.c err = set_mode_select(sdev, h); h 755 drivers/scsi/device_handler/scsi_dh_rdac.c RDAC_NAME, h->lun, mode[(int)h->mode], h 756 drivers/scsi/device_handler/scsi_dh_rdac.c lun_state[(int)h->lun_state]); h 758 drivers/scsi/device_handler/scsi_dh_rdac.c sdev->handler_data = h; h 763 drivers/scsi/device_handler/scsi_dh_rdac.c kref_put(&h->ctlr->kref, release_controller); h 767 drivers/scsi/device_handler/scsi_dh_rdac.c kfree(h); h 773 drivers/scsi/device_handler/scsi_dh_rdac.c struct rdac_dh_data *h = sdev->handler_data; h 775 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->ctlr && h->ctlr->ms_queued) h 779 drivers/scsi/device_handler/scsi_dh_rdac.c if (h->ctlr) { h 780 drivers/scsi/device_handler/scsi_dh_rdac.c list_del_rcu(&h->node); h 781 drivers/scsi/device_handler/scsi_dh_rdac.c h->sdev = NULL; h 782 drivers/scsi/device_handler/scsi_dh_rdac.c kref_put(&h->ctlr->kref, release_controller); h 786 drivers/scsi/device_handler/scsi_dh_rdac.c kfree(h); h 29 drivers/scsi/dpti.h static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd); h 1314 drivers/scsi/esas2r/esas2r_main.c struct atto_vda_dh_info *h; h 1402 drivers/scsi/esas2r/esas2r_main.c h = (struct atto_vda_dh_info *)data; h 1404 drivers/scsi/esas2r/esas2r_main.c h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt); h 1405 drivers/scsi/esas2r/esas2r_main.c h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt); h 161 drivers/scsi/gdth.c static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); h 238 drivers/scsi/hpsa.c static int hpsa_add_sas_host(struct ctlr_info *h); h 239 drivers/scsi/hpsa.c static void hpsa_delete_sas_host(struct ctlr_info *h); h 244 drivers/scsi/hpsa.c *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, h 263 drivers/scsi/hpsa.c static void cmd_free(struct ctlr_info *h, struct CommandList *c); h 264 drivers/scsi/hpsa.c static struct CommandList *cmd_alloc(struct ctlr_info *h); h 265 drivers/scsi/hpsa.c static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); h 266 drivers/scsi/hpsa.c static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, h 268 drivers/scsi/hpsa.c static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, h 271 drivers/scsi/hpsa.c static void hpsa_free_cmd_pool(struct ctlr_info *h); h 275 drivers/scsi/hpsa.c static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); h 286 drivers/scsi/hpsa.c static void hpsa_update_scsi_devices(struct ctlr_info *h); h 287 drivers/scsi/hpsa.c static int check_for_unit_attention(struct ctlr_info *h, h 289 drivers/scsi/hpsa.c static void check_ioctl_unit_attention(struct ctlr_info *h, h 294 drivers/scsi/hpsa.c static void hpsa_free_performant_mode(struct ctlr_info *h); h 295 drivers/scsi/hpsa.c static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); h 296 drivers/scsi/hpsa.c static inline u32 next_command(struct ctlr_info *h, u8 q); h 304 drivers/scsi/hpsa.c static int wait_for_device_to_become_ready(struct ctlr_info *h, h 310 drivers/scsi/hpsa.c static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); h 313 drivers/scsi/hpsa.c static void hpsa_drain_accel_commands(struct ctlr_info *h); h 314 drivers/scsi/hpsa.c static void hpsa_flush_cache(struct ctlr_info *h); h 315 drivers/scsi/hpsa.c static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, h 319 drivers/scsi/hpsa.c static u32 lockup_detected(struct ctlr_info *h); h 320 drivers/scsi/hpsa.c static int detect_controller_lockup(struct ctlr_info *h); h 321 drivers/scsi/hpsa.c static void hpsa_disable_rld_caching(struct ctlr_info *h); h 322 drivers/scsi/hpsa.c static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, h 324 drivers/scsi/hpsa.c static bool hpsa_vpd_page_supported(struct ctlr_info *h, h 326 drivers/scsi/hpsa.c static int hpsa_luns_changed(struct ctlr_info *h); h 327 drivers/scsi/hpsa.c static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, h 370 drivers/scsi/hpsa.c static int check_for_unit_attention(struct ctlr_info *h, h 388 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 390 drivers/scsi/hpsa.c h->devname); h 393 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 394 drivers/scsi/hpsa.c "%s: LUN failure detected\n", h->devname); h 397 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 398 drivers/scsi/hpsa.c "%s: report LUN data changed\n", h->devname); h 405 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 407 drivers/scsi/hpsa.c h->devname); h 410 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 412 drivers/scsi/hpsa.c h->devname); h 415 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 417 drivers/scsi/hpsa.c h->devname); h 423 drivers/scsi/hpsa.c static int check_for_busy(struct ctlr_info *h, struct CommandList *c) h 429 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, HPSA "device busy"); h 433 drivers/scsi/hpsa.c static u32 lockup_detected(struct ctlr_info *h); h 438 drivers/scsi/hpsa.c struct ctlr_info *h; h 441 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 442 drivers/scsi/hpsa.c ld = lockup_detected(h); h 452 drivers/scsi/hpsa.c struct ctlr_info *h; h 463 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 464 drivers/scsi/hpsa.c h->acciopath_status = !!status; h 465 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 467 drivers/scsi/hpsa.c h->acciopath_status ? "enabled" : "disabled"); h 476 drivers/scsi/hpsa.c struct ctlr_info *h; h 489 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 490 drivers/scsi/hpsa.c h->raid_offload_debug = debug_level; h 491 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", h 492 drivers/scsi/hpsa.c h->raid_offload_debug); h 500 drivers/scsi/hpsa.c struct ctlr_info *h; h 502 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 503 drivers/scsi/hpsa.c hpsa_scan_start(h->scsi_host); h 510 drivers/scsi/hpsa.c struct ctlr_info *h; h 514 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 515 drivers/scsi/hpsa.c if (!h->hba_inquiry_data) h 517 drivers/scsi/hpsa.c fwrev = &h->hba_inquiry_data[32]; h 526 drivers/scsi/hpsa.c struct ctlr_info *h = shost_to_hba(shost); h 529 drivers/scsi/hpsa.c atomic_read(&h->commands_outstanding)); h 535 drivers/scsi/hpsa.c struct ctlr_info *h; h 538 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 540 drivers/scsi/hpsa.c h->transMethod & CFGTBL_Trans_Performant ? h 547 drivers/scsi/hpsa.c struct ctlr_info *h; h 550 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 552 drivers/scsi/hpsa.c (h->acciopath_status == 1) ? "enabled" : "disabled"); h 631 drivers/scsi/hpsa.c struct ctlr_info *h; h 634 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 635 drivers/scsi/hpsa.c return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); h 666 drivers/scsi/hpsa.c struct ctlr_info *h; h 672 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 673 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 676 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 682 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 688 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 698 drivers/scsi/hpsa.c struct ctlr_info *h; h 705 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 706 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 709 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 713 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 720 drivers/scsi/hpsa.c struct ctlr_info *h; h 727 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 728 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 731 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 735 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 748 drivers/scsi/hpsa.c struct ctlr_info *h; h 755 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 756 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 759 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 763 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 771 drivers/scsi/hpsa.c struct ctlr_info *h; h 778 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 779 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 782 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 786 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 799 drivers/scsi/hpsa.c struct ctlr_info *h; h 812 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 813 drivers/scsi/hpsa.c spin_lock_irqsave(&h->devlock, flags); h 816 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->devlock, flags); h 833 drivers/scsi/hpsa.c h->scsi_host->host_no, h 877 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->devlock, flags); h 884 drivers/scsi/hpsa.c struct ctlr_info *h; h 887 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 888 drivers/scsi/hpsa.c return snprintf(buf, 20, "%d\n", h->ctlr); h 894 drivers/scsi/hpsa.c struct ctlr_info *h; h 897 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 898 drivers/scsi/hpsa.c return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); h 979 drivers/scsi/hpsa.c static inline u32 next_command(struct ctlr_info *h, u8 q) h 982 drivers/scsi/hpsa.c struct reply_queue_buffer *rq = &h->reply_queue[q]; h 984 drivers/scsi/hpsa.c if (h->transMethod & CFGTBL_Trans_io_accel1) h 985 drivers/scsi/hpsa.c return h->access.command_completed(h, q); h 987 drivers/scsi/hpsa.c if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) h 988 drivers/scsi/hpsa.c return h->access.command_completed(h, q); h 993 drivers/scsi/hpsa.c atomic_dec(&h->commands_outstanding); h 998 drivers/scsi/hpsa.c if (rq->current_entry == h->max_commands) { h 1037 drivers/scsi/hpsa.c static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, h 1040 drivers/scsi/hpsa.c if (likely(h->transMethod & CFGTBL_Trans_Performant)) { h 1041 drivers/scsi/hpsa.c c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); h 1042 drivers/scsi/hpsa.c if (unlikely(!h->msix_vectors)) h 1048 drivers/scsi/hpsa.c static void set_ioaccel1_performant_mode(struct ctlr_info *h, h 1052 drivers/scsi/hpsa.c struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; h 1065 drivers/scsi/hpsa.c c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | h 1069 drivers/scsi/hpsa.c static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, h 1074 drivers/scsi/hpsa.c &h->ioaccel2_cmd_pool[c->cmdindex]; h 1085 drivers/scsi/hpsa.c c->busaddr |= h->ioaccel2_blockFetchTable[0]; h 1088 drivers/scsi/hpsa.c static void set_ioaccel2_performant_mode(struct ctlr_info *h, h 1092 drivers/scsi/hpsa.c struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; h 1105 drivers/scsi/hpsa.c c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); h 1121 drivers/scsi/hpsa.c static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, h 1126 drivers/scsi/hpsa.c atomic_inc(&h->firmware_flash_in_progress); h 1127 drivers/scsi/hpsa.c h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; h 1130 drivers/scsi/hpsa.c static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, h 1134 drivers/scsi/hpsa.c atomic_dec_and_test(&h->firmware_flash_in_progress)) h 1135 drivers/scsi/hpsa.c h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; h 1138 drivers/scsi/hpsa.c static void __enqueue_cmd_and_start_io(struct ctlr_info *h, h 1141 drivers/scsi/hpsa.c dial_down_lockup_detection_during_fw_flash(h, c); h 1142 drivers/scsi/hpsa.c atomic_inc(&h->commands_outstanding); h 1146 drivers/scsi/hpsa.c reply_queue = h->reply_map[raw_smp_processor_id()]; h 1149 drivers/scsi/hpsa.c set_ioaccel1_performant_mode(h, c, reply_queue); h 1150 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); h 1153 drivers/scsi/hpsa.c set_ioaccel2_performant_mode(h, c, reply_queue); h 1154 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); h 1157 drivers/scsi/hpsa.c set_ioaccel2_tmf_performant_mode(h, c, reply_queue); h 1158 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); h 1161 drivers/scsi/hpsa.c set_performant_mode(h, c, reply_queue); h 1162 drivers/scsi/hpsa.c h->access.submit_command(h, c); h 1166 drivers/scsi/hpsa.c static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) h 1168 drivers/scsi/hpsa.c __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); h 1176 drivers/scsi/hpsa.c static inline int is_scsi_rev_5(struct ctlr_info *h) h 1178 drivers/scsi/hpsa.c if (!h->hba_inquiry_data) h 1180 drivers/scsi/hpsa.c if ((h->hba_inquiry_data[2] & 0x07) == 5) h 1185 drivers/scsi/hpsa.c static int hpsa_find_target_lun(struct ctlr_info *h, h 1196 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 1197 drivers/scsi/hpsa.c if (h->dev[i]->bus == bus && h->dev[i]->target != -1) h 1198 drivers/scsi/hpsa.c __set_bit(h->dev[i]->target, lun_taken); h 1211 drivers/scsi/hpsa.c static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, h 1217 drivers/scsi/hpsa.c if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) h 1253 drivers/scsi/hpsa.c dev_printk(level, &h->pdev->dev, h 1255 drivers/scsi/hpsa.c h->scsi_host->host_no, dev->bus, dev->target, dev->lun, h 1267 drivers/scsi/hpsa.c static int hpsa_scsi_add_entry(struct ctlr_info *h, h 1272 drivers/scsi/hpsa.c int n = h->ndevices; h 1278 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "too many devices, some will be " h 1294 drivers/scsi/hpsa.c if (hpsa_find_target_lun(h, device->scsi3addr, h 1310 drivers/scsi/hpsa.c sd = h->dev[i]; h 1323 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "physical device with no LUN=0," h 1331 drivers/scsi/hpsa.c h->dev[n] = device; h 1332 drivers/scsi/hpsa.c h->ndevices++; h 1335 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, device, h 1345 drivers/scsi/hpsa.c static void hpsa_scsi_update_entry(struct ctlr_info *h, h 1352 drivers/scsi/hpsa.c h->dev[entry]->raid_level = new_entry->raid_level; h 1357 drivers/scsi/hpsa.c h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; h 1369 drivers/scsi/hpsa.c h->dev[entry]->raid_map = new_entry->raid_map; h 1370 drivers/scsi/hpsa.c h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; h 1373 drivers/scsi/hpsa.c h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; h 1376 drivers/scsi/hpsa.c h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; h 1377 drivers/scsi/hpsa.c h->dev[entry]->offload_config = new_entry->offload_config; h 1378 drivers/scsi/hpsa.c h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; h 1379 drivers/scsi/hpsa.c h->dev[entry]->queue_depth = new_entry->queue_depth; h 1386 drivers/scsi/hpsa.c h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; h 1392 drivers/scsi/hpsa.c h->dev[entry]->offload_enabled = 0; h 1394 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); h 1398 drivers/scsi/hpsa.c static void hpsa_scsi_replace_entry(struct ctlr_info *h, h 1405 drivers/scsi/hpsa.c removed[*nremoved] = h->dev[entry]; h 1413 drivers/scsi/hpsa.c new_entry->target = h->dev[entry]->target; h 1414 drivers/scsi/hpsa.c new_entry->lun = h->dev[entry]->lun; h 1417 drivers/scsi/hpsa.c h->dev[entry] = new_entry; h 1421 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); h 1425 drivers/scsi/hpsa.c static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, h 1434 drivers/scsi/hpsa.c sd = h->dev[entry]; h 1435 drivers/scsi/hpsa.c removed[*nremoved] = h->dev[entry]; h 1438 drivers/scsi/hpsa.c for (i = entry; i < h->ndevices-1; i++) h 1439 drivers/scsi/hpsa.c h->dev[i] = h->dev[i+1]; h 1440 drivers/scsi/hpsa.c h->ndevices--; h 1441 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); h 1454 drivers/scsi/hpsa.c static void fixup_botched_add(struct ctlr_info *h, h 1463 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 1464 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 1465 drivers/scsi/hpsa.c if (h->dev[i] == added) { h 1466 drivers/scsi/hpsa.c for (j = i; j < h->ndevices-1; j++) h 1467 drivers/scsi/hpsa.c h->dev[j] = h->dev[j+1]; h 1468 drivers/scsi/hpsa.c h->ndevices--; h 1472 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 1571 drivers/scsi/hpsa.c static void hpsa_monitor_offline_device(struct ctlr_info *h, h 1578 drivers/scsi/hpsa.c spin_lock_irqsave(&h->offline_device_lock, flags); h 1579 drivers/scsi/hpsa.c list_for_each_entry(device, &h->offline_device_list, offline_list) { h 1582 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->offline_device_lock, flags); h 1586 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->offline_device_lock, flags); h 1594 drivers/scsi/hpsa.c spin_lock_irqsave(&h->offline_device_lock, flags); h 1595 drivers/scsi/hpsa.c list_add_tail(&device->offline_list, &h->offline_device_list); h 1596 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->offline_device_lock, flags); h 1600 drivers/scsi/hpsa.c static void hpsa_show_volume_status(struct ctlr_info *h, h 1604 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1606 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1612 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1614 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1618 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1620 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1624 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1626 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1630 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1632 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1636 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1638 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1642 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1644 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1648 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1650 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1654 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1656 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1660 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1662 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1666 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1668 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1672 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 1674 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1684 drivers/scsi/hpsa.c static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, h 1723 drivers/scsi/hpsa.c qdepth = min(h->nr_cmds, qdepth + h 1736 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 1739 drivers/scsi/hpsa.c h->scsi_host->host_no, logical_drive->bus, h 1756 drivers/scsi/hpsa.c logical_drive->queue_depth = h->nr_cmds; h 1760 drivers/scsi/hpsa.c static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, h 1794 drivers/scsi/hpsa.c hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); h 1798 drivers/scsi/hpsa.c static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) h 1802 drivers/scsi/hpsa.c if (!h->scsi_host) h 1806 drivers/scsi/hpsa.c rc = scsi_add_device(h->scsi_host, device->bus, h 1809 drivers/scsi/hpsa.c rc = hpsa_add_sas_device(h->sas_host, device); h 1814 drivers/scsi/hpsa.c static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, h 1820 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 1821 drivers/scsi/hpsa.c struct CommandList *c = h->cmd_pool + i; h 1824 drivers/scsi/hpsa.c if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, h 1828 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); /* Implied MB */ h 1831 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 1834 drivers/scsi/hpsa.c cmd_free(h, c); h 1841 drivers/scsi/hpsa.c static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, h 1852 drivers/scsi/hpsa.c cmds = hpsa_find_outstanding_commands_for_dev(h, device); h 1861 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 1864 drivers/scsi/hpsa.c h->scsi_host->host_no, h 1869 drivers/scsi/hpsa.c static void hpsa_remove_device(struct ctlr_info *h, h 1874 drivers/scsi/hpsa.c if (!h->scsi_host) h 1881 drivers/scsi/hpsa.c hpsa_wait_for_outstanding_commands_for_dev(h, device); h 1884 drivers/scsi/hpsa.c sdev = scsi_device_lookup(h->scsi_host, device->bus, h 1895 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_WARNING, h, device, h 1904 drivers/scsi/hpsa.c static void adjust_hpsa_scsi_table(struct ctlr_info *h, h 1921 drivers/scsi/hpsa.c spin_lock_irqsave(&h->reset_lock, flags); h 1922 drivers/scsi/hpsa.c if (h->reset_in_progress) { h 1923 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 1924 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 1927 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 1933 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "out of memory in " h 1938 drivers/scsi/hpsa.c spin_lock_irqsave(&h->devlock, flags); h 1950 drivers/scsi/hpsa.c while (i < h->ndevices) { h 1951 drivers/scsi/hpsa.c csd = h->dev[i]; h 1955 drivers/scsi/hpsa.c hpsa_scsi_remove_entry(h, i, removed, &nremoved); h 1959 drivers/scsi/hpsa.c hpsa_scsi_replace_entry(h, i, sd[entry], h 1966 drivers/scsi/hpsa.c hpsa_scsi_update_entry(h, i, sd[entry]); h 1985 drivers/scsi/hpsa.c hpsa_show_volume_status(h, sd[i]); h 1986 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); h 1990 drivers/scsi/hpsa.c device_change = hpsa_scsi_find_entry(sd[i], h->dev, h 1991 drivers/scsi/hpsa.c h->ndevices, &entry); h 1994 drivers/scsi/hpsa.c if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) h 2000 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 2005 drivers/scsi/hpsa.c hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); h 2015 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 2016 drivers/scsi/hpsa.c if (h->dev[i] == NULL) h 2018 drivers/scsi/hpsa.c h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; h 2021 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->devlock, flags); h 2031 drivers/scsi/hpsa.c hpsa_monitor_offline_device(h, sd[i]->scsi3addr); h 2046 drivers/scsi/hpsa.c hpsa_remove_device(h, removed[i]); h 2059 drivers/scsi/hpsa.c rc = hpsa_add_device(h, added[i]); h 2062 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 2067 drivers/scsi/hpsa.c fixup_botched_add(h, added[i]); h 2068 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 2080 drivers/scsi/hpsa.c static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, h 2086 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 2087 drivers/scsi/hpsa.c sd = h->dev[i]; h 2098 drivers/scsi/hpsa.c struct ctlr_info *h; h 2100 drivers/scsi/hpsa.c h = sdev_to_hba(sdev); h 2101 drivers/scsi/hpsa.c spin_lock_irqsave(&h->devlock, flags); h 2108 drivers/scsi/hpsa.c sd = hpsa_find_device_by_sas_rphy(h, rphy); h 2115 drivers/scsi/hpsa.c sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), h 2123 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->devlock, flags); h 2165 drivers/scsi/hpsa.c static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) h 2169 drivers/scsi/hpsa.c if (!h->ioaccel2_cmd_sg_list) h 2171 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 2172 drivers/scsi/hpsa.c kfree(h->ioaccel2_cmd_sg_list[i]); h 2173 drivers/scsi/hpsa.c h->ioaccel2_cmd_sg_list[i] = NULL; h 2175 drivers/scsi/hpsa.c kfree(h->ioaccel2_cmd_sg_list); h 2176 drivers/scsi/hpsa.c h->ioaccel2_cmd_sg_list = NULL; h 2179 drivers/scsi/hpsa.c static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) h 2183 drivers/scsi/hpsa.c if (h->chainsize <= 0) h 2186 drivers/scsi/hpsa.c h->ioaccel2_cmd_sg_list = h 2187 drivers/scsi/hpsa.c kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list), h 2189 drivers/scsi/hpsa.c if (!h->ioaccel2_cmd_sg_list) h 2191 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 2192 drivers/scsi/hpsa.c h->ioaccel2_cmd_sg_list[i] = h 2193 drivers/scsi/hpsa.c kmalloc_array(h->maxsgentries, h 2194 drivers/scsi/hpsa.c sizeof(*h->ioaccel2_cmd_sg_list[i]), h 2196 drivers/scsi/hpsa.c if (!h->ioaccel2_cmd_sg_list[i]) h 2202 drivers/scsi/hpsa.c hpsa_free_ioaccel2_sg_chain_blocks(h); h 2206 drivers/scsi/hpsa.c static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) h 2210 drivers/scsi/hpsa.c if (!h->cmd_sg_list) h 2212 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 2213 drivers/scsi/hpsa.c kfree(h->cmd_sg_list[i]); h 2214 drivers/scsi/hpsa.c h->cmd_sg_list[i] = NULL; h 2216 drivers/scsi/hpsa.c kfree(h->cmd_sg_list); h 2217 drivers/scsi/hpsa.c h->cmd_sg_list = NULL; h 2220 drivers/scsi/hpsa.c static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) h 2224 drivers/scsi/hpsa.c if (h->chainsize <= 0) h 2227 drivers/scsi/hpsa.c h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list), h 2229 drivers/scsi/hpsa.c if (!h->cmd_sg_list) h 2232 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 2233 drivers/scsi/hpsa.c h->cmd_sg_list[i] = kmalloc_array(h->chainsize, h 2234 drivers/scsi/hpsa.c sizeof(*h->cmd_sg_list[i]), h 2236 drivers/scsi/hpsa.c if (!h->cmd_sg_list[i]) h 2243 drivers/scsi/hpsa.c hpsa_free_sg_chain_blocks(h); h 2247 drivers/scsi/hpsa.c static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, h 2254 drivers/scsi/hpsa.c chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; h 2256 drivers/scsi/hpsa.c temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, h 2258 drivers/scsi/hpsa.c if (dma_mapping_error(&h->pdev->dev, temp64)) { h 2267 drivers/scsi/hpsa.c static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, h 2277 drivers/scsi/hpsa.c dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); h 2280 drivers/scsi/hpsa.c static int hpsa_map_sg_chain_block(struct ctlr_info *h, h 2287 drivers/scsi/hpsa.c chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; h 2288 drivers/scsi/hpsa.c chain_block = h->cmd_sg_list[c->cmdindex]; h 2291 drivers/scsi/hpsa.c (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); h 2293 drivers/scsi/hpsa.c temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, h 2295 drivers/scsi/hpsa.c if (dma_mapping_error(&h->pdev->dev, temp64)) { h 2304 drivers/scsi/hpsa.c static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, h 2309 drivers/scsi/hpsa.c if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) h 2312 drivers/scsi/hpsa.c chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; h 2313 drivers/scsi/hpsa.c dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), h 2322 drivers/scsi/hpsa.c static int handle_ioaccel_mode2_error(struct ctlr_info *h, h 2403 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 2404 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 2438 drivers/scsi/hpsa.c static void hpsa_cmd_resolve_events(struct ctlr_info *h, h 2454 drivers/scsi/hpsa.c wake_up_all(&h->event_sync_wait_queue); h 2458 drivers/scsi/hpsa.c static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, h 2461 drivers/scsi/hpsa.c hpsa_cmd_resolve_events(h, c); h 2462 drivers/scsi/hpsa.c cmd_tagged_free(h, c); h 2465 drivers/scsi/hpsa.c static void hpsa_cmd_free_and_done(struct ctlr_info *h, h 2468 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); h 2473 drivers/scsi/hpsa.c static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) h 2476 drivers/scsi/hpsa.c queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); h 2479 drivers/scsi/hpsa.c static void process_ioaccel2_completion(struct ctlr_info *h, h 2483 drivers/scsi/hpsa.c struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; h 2489 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); h 2508 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); h 2511 drivers/scsi/hpsa.c return hpsa_retry_cmd(h, c); h 2514 drivers/scsi/hpsa.c if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) h 2515 drivers/scsi/hpsa.c return hpsa_retry_cmd(h, c); h 2517 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); h 2521 drivers/scsi/hpsa.c static int hpsa_evaluate_tmf_status(struct ctlr_info *h, h 2541 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", h 2551 drivers/scsi/hpsa.c struct ctlr_info *h; h 2563 drivers/scsi/hpsa.c h = cp->h; h 2567 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, cp, cmd); h 2573 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, cp, cmd); h 2575 drivers/scsi/hpsa.c c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; h 2579 drivers/scsi/hpsa.c (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) h 2580 drivers/scsi/hpsa.c hpsa_unmap_sg_chain_block(h, cp); h 2584 drivers/scsi/hpsa.c hpsa_unmap_ioaccel2_sg_chain_block(h, c2); h 2591 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, cp); h 2599 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, cp, cmd); h 2613 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, cp, cmd); h 2617 drivers/scsi/hpsa.c return process_ioaccel2_completion(h, cp, cmd, dev); h 2621 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, cp, cmd); h 2627 drivers/scsi/hpsa.c struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; h 2643 drivers/scsi/hpsa.c return hpsa_retry_cmd(h, cp); h 2670 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 2685 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "cp %p has status 0x%x " h 2692 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " h 2714 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 2731 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", h 2736 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", h 2741 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", h 2749 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", h 2754 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", h 2759 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", h 2764 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Command unabortable\n"); h 2767 drivers/scsi/hpsa.c if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ h 2775 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 2780 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", h 2784 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, cp, cmd); h 2829 drivers/scsi/hpsa.c static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, h 2835 drivers/scsi/hpsa.c __enqueue_cmd_and_start_io(h, c, reply_queue); h 2843 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Command timed out.\n"); h 2849 drivers/scsi/hpsa.c static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, h 2852 drivers/scsi/hpsa.c if (unlikely(lockup_detected(h))) { h 2856 drivers/scsi/hpsa.c return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); h 2859 drivers/scsi/hpsa.c static u32 lockup_detected(struct ctlr_info *h) h 2865 drivers/scsi/hpsa.c lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); h 2872 drivers/scsi/hpsa.c static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, h 2881 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, h 2891 drivers/scsi/hpsa.c } while ((check_for_unit_attention(h, c) || h 2892 drivers/scsi/hpsa.c check_for_busy(h, c)) && h 2894 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, 1, data_direction); h 2900 drivers/scsi/hpsa.c static void hpsa_print_cmd(struct ctlr_info *h, char *txt, h 2906 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n", h 2910 drivers/scsi/hpsa.c static void hpsa_scsi_interpret_error(struct ctlr_info *h, h 2914 drivers/scsi/hpsa.c struct device *d = &cp->h->pdev->dev; h 2926 drivers/scsi/hpsa.c hpsa_print_cmd(h, "SCSI status", cp); h 2941 drivers/scsi/hpsa.c hpsa_print_cmd(h, "overrun condition", cp); h 2947 drivers/scsi/hpsa.c hpsa_print_cmd(h, "invalid command", cp); h 2952 drivers/scsi/hpsa.c hpsa_print_cmd(h, "protocol error", cp); h 2955 drivers/scsi/hpsa.c hpsa_print_cmd(h, "hardware error", cp); h 2958 drivers/scsi/hpsa.c hpsa_print_cmd(h, "connection lost", cp); h 2961 drivers/scsi/hpsa.c hpsa_print_cmd(h, "aborted", cp); h 2964 drivers/scsi/hpsa.c hpsa_print_cmd(h, "abort failed", cp); h 2967 drivers/scsi/hpsa.c hpsa_print_cmd(h, "unsolicited abort", cp); h 2970 drivers/scsi/hpsa.c hpsa_print_cmd(h, "timed out", cp); h 2973 drivers/scsi/hpsa.c hpsa_print_cmd(h, "unabortable", cp); h 2976 drivers/scsi/hpsa.c hpsa_print_cmd(h, "controller lockup detected", cp); h 2979 drivers/scsi/hpsa.c hpsa_print_cmd(h, "unknown status", cp); h 2985 drivers/scsi/hpsa.c static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr, h 2992 drivers/scsi/hpsa.c c = cmd_alloc(h); h 2993 drivers/scsi/hpsa.c if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize, h 2998 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3004 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3008 drivers/scsi/hpsa.c cmd_free(h, c); h 3012 drivers/scsi/hpsa.c static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h, h 3023 drivers/scsi/hpsa.c rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC, h 3036 drivers/scsi/hpsa.c static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, h 3044 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3046 drivers/scsi/hpsa.c if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, h 3051 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3057 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3061 drivers/scsi/hpsa.c cmd_free(h, c); h 3065 drivers/scsi/hpsa.c static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, h 3072 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3076 drivers/scsi/hpsa.c (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); h 3077 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); h 3079 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Failed to send reset command\n"); h 3086 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3090 drivers/scsi/hpsa.c cmd_free(h, c); h 3094 drivers/scsi/hpsa.c static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, h 3100 drivers/scsi/hpsa.c struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; h 3145 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", h 3153 drivers/scsi/hpsa.c static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, h 3159 drivers/scsi/hpsa.c if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { h 3160 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); h 3164 drivers/scsi/hpsa.c rc = hpsa_send_reset(h, dev, reset_type, reply_queue); h 3168 drivers/scsi/hpsa.c wait_event(h->event_sync_wait_queue, h 3170 drivers/scsi/hpsa.c lockup_detected(h)); h 3173 drivers/scsi/hpsa.c if (unlikely(lockup_detected(h))) { h 3174 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 3180 drivers/scsi/hpsa.c rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); h 3182 drivers/scsi/hpsa.c mutex_unlock(&h->reset_mutex); h 3186 drivers/scsi/hpsa.c static void hpsa_get_raid_level(struct ctlr_info *h, h 3197 drivers/scsi/hpsa.c if (!hpsa_vpd_page_supported(h, scsi3addr, h 3201 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | h 3215 drivers/scsi/hpsa.c static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, h 3226 drivers/scsi/hpsa.c if (h->raid_offload_debug < 2) h 3229 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "structure_size = %u\n", h 3231 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "volume_blk_size = %u\n", h 3233 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", h 3235 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", h 3237 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", h 3239 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "strip_size = %u\n", h 3241 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", h 3243 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", h 3245 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", h 3247 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", h 3249 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "row_cnt = %u\n", h 3251 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "layout_map_count = %u\n", h 3253 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "flags = 0x%x\n", h 3255 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "encryption = %s\n", h 3258 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "dekindex = %u\n", h 3262 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "Map%u:\n", map); h 3265 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, " Row%u:\n", row); h 3269 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 3276 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 3284 drivers/scsi/hpsa.c static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, h 3291 drivers/scsi/hpsa.c static int hpsa_get_raid_map(struct ctlr_info *h, h 3298 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3300 drivers/scsi/hpsa.c if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, h 3303 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); h 3304 drivers/scsi/hpsa.c cmd_free(h, c); h 3307 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3313 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3317 drivers/scsi/hpsa.c cmd_free(h, c); h 3322 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); h 3325 drivers/scsi/hpsa.c hpsa_debug_map_buff(h, rc, &this_device->raid_map); h 3328 drivers/scsi/hpsa.c cmd_free(h, c); h 3332 drivers/scsi/hpsa.c static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, h 3340 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3342 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, h 3350 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3356 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3360 drivers/scsi/hpsa.c cmd_free(h, c); h 3364 drivers/scsi/hpsa.c static int hpsa_bmic_id_controller(struct ctlr_info *h, h 3371 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3373 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, h 3378 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3384 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3388 drivers/scsi/hpsa.c cmd_free(h, c); h 3392 drivers/scsi/hpsa.c static int hpsa_bmic_id_physical_device(struct ctlr_info *h, h 3400 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3401 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, h 3409 drivers/scsi/hpsa.c hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3413 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3417 drivers/scsi/hpsa.c cmd_free(h, c); h 3428 drivers/scsi/hpsa.c static void hpsa_get_enclosure_info(struct ctlr_info *h, h 3442 drivers/scsi/hpsa.c hpsa_get_enclosure_logical_identifier(h, scsi3addr); h 3464 drivers/scsi/hpsa.c rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, h 3467 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", h 3472 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3474 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, h 3485 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3506 drivers/scsi/hpsa.c cmd_free(h, c); h 3509 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, encl_dev, h 3513 drivers/scsi/hpsa.c static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, h 3525 drivers/scsi/hpsa.c if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { h 3526 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); h 3543 drivers/scsi/hpsa.c static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, h 3556 drivers/scsi/hpsa.c rc = hpsa_bmic_sense_subsystem_information(h, h 3560 drivers/scsi/hpsa.c h->sas_address = sa; h 3565 drivers/scsi/hpsa.c sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); h 3570 drivers/scsi/hpsa.c static void hpsa_ext_ctrl_present(struct ctlr_info *h, h 3576 drivers/scsi/hpsa.c if (h->discovery_polling) h 3585 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 3587 drivers/scsi/hpsa.c hpsa_disable_rld_caching(h); h 3588 drivers/scsi/hpsa.c h->discovery_polling = 1; h 3595 drivers/scsi/hpsa.c static bool hpsa_vpd_page_supported(struct ctlr_info *h, h 3608 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, h 3620 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, h 3645 drivers/scsi/hpsa.c static void hpsa_get_ioaccel_status(struct ctlr_info *h, h 3659 drivers/scsi/hpsa.c if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) h 3661 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, h 3675 drivers/scsi/hpsa.c if (hpsa_get_raid_map(h, scsi3addr, this_device)) h 3685 drivers/scsi/hpsa.c static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, h 3692 drivers/scsi/hpsa.c if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID)) h 3699 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | h 3712 drivers/scsi/hpsa.c static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, h 3721 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3725 drivers/scsi/hpsa.c if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, h 3732 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 3739 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); h 3745 drivers/scsi/hpsa.c if (!h->legacy_board) { h 3746 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 3756 drivers/scsi/hpsa.c cmd_free(h, c); h 3760 drivers/scsi/hpsa.c static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, h 3766 drivers/scsi/hpsa.c rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, h 3776 drivers/scsi/hpsa.c rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0); h 3791 drivers/scsi/hpsa.c static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, h 3794 drivers/scsi/hpsa.c return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); h 3806 drivers/scsi/hpsa.c static int hpsa_get_volume_status(struct ctlr_info *h, h 3819 drivers/scsi/hpsa.c if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) h 3823 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, h 3830 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, h 3850 drivers/scsi/hpsa.c static unsigned char hpsa_volume_offline(struct ctlr_info *h, h 3864 drivers/scsi/hpsa.c c = cmd_alloc(h); h 3866 drivers/scsi/hpsa.c (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); h 3867 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, h 3870 drivers/scsi/hpsa.c cmd_free(h, c); h 3881 drivers/scsi/hpsa.c cmd_free(h, c); h 3884 drivers/scsi/hpsa.c ldstat = hpsa_get_volume_status(h, scsi3addr); h 3913 drivers/scsi/hpsa.c static int hpsa_update_device_info(struct ctlr_info *h, h 3934 drivers/scsi/hpsa.c if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, h 3936 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 3955 drivers/scsi/hpsa.c if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, h 3957 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 3959 drivers/scsi/hpsa.c h->ctlr, __func__, h 3960 drivers/scsi/hpsa.c h->scsi_host->host_no, h 3974 drivers/scsi/hpsa.c hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); h 3975 drivers/scsi/hpsa.c if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) h 3976 drivers/scsi/hpsa.c hpsa_get_ioaccel_status(h, scsi3addr, this_device); h 3977 drivers/scsi/hpsa.c volume_offline = hpsa_volume_offline(h, scsi3addr); h 3979 drivers/scsi/hpsa.c h->legacy_board) { h 3983 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 3991 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 4003 drivers/scsi/hpsa.c this_device->queue_depth = h->nr_cmds; h 4032 drivers/scsi/hpsa.c static void figure_bus_target_lun(struct ctlr_info *h, h 4063 drivers/scsi/hpsa.c static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, h 4090 drivers/scsi/hpsa.c static int hpsa_gather_lun_info(struct ctlr_info *h, h 4094 drivers/scsi/hpsa.c if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { h 4095 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); h 4100 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", h 4104 drivers/scsi/hpsa.c if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { h 4105 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); h 4111 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 4118 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 4127 drivers/scsi/hpsa.c static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, h 4155 drivers/scsi/hpsa.c static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, h 4169 drivers/scsi/hpsa.c rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], h 4210 drivers/scsi/hpsa.c static int hpsa_set_local_logical_count(struct ctlr_info *h, h 4217 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", h 4222 drivers/scsi/hpsa.c rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); h 4234 drivers/scsi/hpsa.c static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) h 4244 drivers/scsi/hpsa.c rc = hpsa_bmic_id_physical_device(h, h 4261 drivers/scsi/hpsa.c static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, h 4293 drivers/scsi/hpsa.c if (hpsa_is_disk_spare(h, lunaddrbytes)) h 4299 drivers/scsi/hpsa.c static void hpsa_update_scsi_devices(struct ctlr_info *h) h 4335 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "out of memory\n"); h 4340 drivers/scsi/hpsa.c h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ h 4342 drivers/scsi/hpsa.c if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, h 4344 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 4349 drivers/scsi/hpsa.c if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { h 4350 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 4361 drivers/scsi/hpsa.c hpsa_ext_ctrl_present(h, physdev_list); h 4366 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." h 4374 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 4380 drivers/scsi/hpsa.c if (is_scsi_rev_5(h)) h 4398 drivers/scsi/hpsa.c lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, h 4403 drivers/scsi/hpsa.c figure_external_status(h, raid_ctlr_position, i, h 4410 drivers/scsi/hpsa.c skip_device = hpsa_skip_device(h, lunaddrbytes, h 4417 drivers/scsi/hpsa.c rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, h 4420 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 4422 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 4426 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 4430 drivers/scsi/hpsa.c figure_bus_target_lun(h, lunaddrbytes, tmpdevice); h 4450 drivers/scsi/hpsa.c hpsa_get_sas_address(h, lunaddrbytes, this_device); h 4470 drivers/scsi/hpsa.c hpsa_get_ioaccel_drive_info(h, this_device, h 4483 drivers/scsi/hpsa.c hpsa_get_enclosure_info(h, lunaddrbytes, h 4505 drivers/scsi/hpsa.c if (h->sas_host == NULL) { h 4508 drivers/scsi/hpsa.c rc = hpsa_add_sas_host(h); h 4510 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 4516 drivers/scsi/hpsa.c adjust_hpsa_scsi_table(h, currentsd, ncurrent); h 4544 drivers/scsi/hpsa.c static int hpsa_scatter_gather(struct ctlr_info *h, h 4552 drivers/scsi/hpsa.c BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); h 4569 drivers/scsi/hpsa.c chained = use_sg > h->max_cmd_sg_entries; h 4570 drivers/scsi/hpsa.c sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; h 4584 drivers/scsi/hpsa.c curr_sg = h->cmd_sg_list[cp->cmdindex]; h 4595 drivers/scsi/hpsa.c if (use_sg + chained > h->maxSG) h 4596 drivers/scsi/hpsa.c h->maxSG = use_sg + chained; h 4599 drivers/scsi/hpsa.c cp->Header.SGList = h->max_cmd_sg_entries; h 4601 drivers/scsi/hpsa.c if (hpsa_map_sg_chain_block(h, cp)) { h 4615 drivers/scsi/hpsa.c static inline void warn_zero_length_transfer(struct ctlr_info *h, h 4619 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 4701 drivers/scsi/hpsa.c static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, h 4706 drivers/scsi/hpsa.c struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; h 4716 drivers/scsi/hpsa.c if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { h 4724 drivers/scsi/hpsa.c warn_zero_length_transfer(h, cdb, cdb_len, __func__); h 4737 drivers/scsi/hpsa.c c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + h 4771 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "unknown data direction: %d\n", h 4790 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); h 4798 drivers/scsi/hpsa.c static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, h 4812 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, h 4819 drivers/scsi/hpsa.c static void set_encrypt_ioaccel2(struct ctlr_info *h, h 4860 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 4875 drivers/scsi/hpsa.c static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, h 4880 drivers/scsi/hpsa.c struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; h 4894 drivers/scsi/hpsa.c BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); h 4897 drivers/scsi/hpsa.c warn_zero_length_transfer(h, cdb, cdb_len, __func__); h 4909 drivers/scsi/hpsa.c c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + h 4924 drivers/scsi/hpsa.c if (use_sg > h->ioaccel_maxsg) { h 4926 drivers/scsi/hpsa.c h->ioaccel2_cmd_sg_list[c->cmdindex]->address); h 4934 drivers/scsi/hpsa.c curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; h 4968 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "unknown data direction: %d\n", h 4979 drivers/scsi/hpsa.c set_encrypt_ioaccel2(h, c, cp); h 4991 drivers/scsi/hpsa.c if (use_sg > h->ioaccel_maxsg) { h 4994 drivers/scsi/hpsa.c if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { h 5007 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); h 5014 drivers/scsi/hpsa.c static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, h 5033 drivers/scsi/hpsa.c if (h->transMethod & CFGTBL_Trans_io_accel1) h 5034 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, h 5038 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, h 5072 drivers/scsi/hpsa.c static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, h 5405 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, h 5415 drivers/scsi/hpsa.c static int hpsa_ciss_submit(struct ctlr_info *h, h 5464 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "unknown data direction: %d\n", h 5470 drivers/scsi/hpsa.c if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ h 5471 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); h 5476 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); h 5482 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); h 5487 drivers/scsi/hpsa.c static void hpsa_cmd_init(struct ctlr_info *h, int index, h 5495 drivers/scsi/hpsa.c cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); h 5496 drivers/scsi/hpsa.c c->err_info = h->errinfo_pool + index; h 5498 drivers/scsi/hpsa.c err_dma_handle = h->errinfo_pool_dhandle h 5504 drivers/scsi/hpsa.c c->h = h; h 5508 drivers/scsi/hpsa.c static void hpsa_preinitialize_commands(struct ctlr_info *h) h 5512 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 5513 drivers/scsi/hpsa.c struct CommandList *c = h->cmd_pool + i; h 5515 drivers/scsi/hpsa.c hpsa_cmd_init(h, i, c); h 5520 drivers/scsi/hpsa.c static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, h 5523 drivers/scsi/hpsa.c dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); h 5532 drivers/scsi/hpsa.c static int hpsa_ioaccel_submit(struct ctlr_info *h, h 5550 drivers/scsi/hpsa.c hpsa_cmd_init(h, c->cmdindex, c); h 5554 drivers/scsi/hpsa.c rc = hpsa_scsi_ioaccel_raid_map(h, c); h 5558 drivers/scsi/hpsa.c hpsa_cmd_init(h, c->cmdindex, c); h 5562 drivers/scsi/hpsa.c rc = hpsa_scsi_ioaccel_direct_map(h, c); h 5579 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(c->h, c, cmd); h 5584 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(c->h, c, cmd); h 5588 drivers/scsi/hpsa.c struct ctlr_info *h = c->h; h 5589 drivers/scsi/hpsa.c struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; h 5594 drivers/scsi/hpsa.c rc = hpsa_ioaccel_submit(h, c, cmd); h 5604 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); h 5609 drivers/scsi/hpsa.c hpsa_cmd_partial_init(c->h, c->cmdindex, c); h 5610 drivers/scsi/hpsa.c if (hpsa_ciss_submit(c->h, c, cmd, dev)) { h 5627 drivers/scsi/hpsa.c struct ctlr_info *h; h 5633 drivers/scsi/hpsa.c h = sdev_to_hba(cmd->device); h 5650 drivers/scsi/hpsa.c if (unlikely(lockup_detected(h))) { h 5659 drivers/scsi/hpsa.c c = cmd_tagged_alloc(h, cmd); h 5675 drivers/scsi/hpsa.c h->acciopath_status)) { h 5676 drivers/scsi/hpsa.c rc = hpsa_ioaccel_submit(h, c, cmd); h 5680 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); h 5684 drivers/scsi/hpsa.c return hpsa_ciss_submit(h, c, cmd, dev); h 5687 drivers/scsi/hpsa.c static void hpsa_scan_complete(struct ctlr_info *h) h 5691 drivers/scsi/hpsa.c spin_lock_irqsave(&h->scan_lock, flags); h 5692 drivers/scsi/hpsa.c h->scan_finished = 1; h 5693 drivers/scsi/hpsa.c wake_up(&h->scan_wait_queue); h 5694 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->scan_lock, flags); h 5699 drivers/scsi/hpsa.c struct ctlr_info *h = shost_to_hba(sh); h 5708 drivers/scsi/hpsa.c if (unlikely(lockup_detected(h))) h 5709 drivers/scsi/hpsa.c return hpsa_scan_complete(h); h 5714 drivers/scsi/hpsa.c spin_lock_irqsave(&h->scan_lock, flags); h 5715 drivers/scsi/hpsa.c if (h->scan_waiting) { h 5716 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->scan_lock, flags); h 5720 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->scan_lock, flags); h 5724 drivers/scsi/hpsa.c spin_lock_irqsave(&h->scan_lock, flags); h 5725 drivers/scsi/hpsa.c if (h->scan_finished) h 5727 drivers/scsi/hpsa.c h->scan_waiting = 1; h 5728 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->scan_lock, flags); h 5729 drivers/scsi/hpsa.c wait_event(h->scan_wait_queue, h->scan_finished); h 5736 drivers/scsi/hpsa.c h->scan_finished = 0; /* mark scan as in progress */ h 5737 drivers/scsi/hpsa.c h->scan_waiting = 0; h 5738 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->scan_lock, flags); h 5740 drivers/scsi/hpsa.c if (unlikely(lockup_detected(h))) h 5741 drivers/scsi/hpsa.c return hpsa_scan_complete(h); h 5746 drivers/scsi/hpsa.c spin_lock_irqsave(&h->reset_lock, flags); h 5747 drivers/scsi/hpsa.c if (h->reset_in_progress) { h 5748 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 5749 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 5750 drivers/scsi/hpsa.c hpsa_scan_complete(h); h 5753 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 5755 drivers/scsi/hpsa.c hpsa_update_scsi_devices(h); h 5757 drivers/scsi/hpsa.c hpsa_scan_complete(h); h 5778 drivers/scsi/hpsa.c struct ctlr_info *h = shost_to_hba(sh); h 5782 drivers/scsi/hpsa.c spin_lock_irqsave(&h->scan_lock, flags); h 5783 drivers/scsi/hpsa.c finished = h->scan_finished; h 5784 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->scan_lock, flags); h 5788 drivers/scsi/hpsa.c static int hpsa_scsi_host_alloc(struct ctlr_info *h) h 5792 drivers/scsi/hpsa.c sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); h 5794 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); h 5805 drivers/scsi/hpsa.c sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; h 5807 drivers/scsi/hpsa.c sh->sg_tablesize = h->maxsgentries; h 5809 drivers/scsi/hpsa.c sh->hostdata[0] = (unsigned long) h; h 5810 drivers/scsi/hpsa.c sh->irq = pci_irq_vector(h->pdev, 0); h 5813 drivers/scsi/hpsa.c h->scsi_host = sh; h 5817 drivers/scsi/hpsa.c static int hpsa_scsi_add_host(struct ctlr_info *h) h 5821 drivers/scsi/hpsa.c rv = scsi_add_host(h->scsi_host, &h->pdev->dev); h 5823 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "scsi_add_host failed\n"); h 5826 drivers/scsi/hpsa.c scsi_scan_host(h->scsi_host); h 5851 drivers/scsi/hpsa.c static int hpsa_send_test_unit_ready(struct ctlr_info *h, h 5858 drivers/scsi/hpsa.c (void) fill_cmd(c, TEST_UNIT_READY, h, h 5860 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); h 5887 drivers/scsi/hpsa.c static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, h 5904 drivers/scsi/hpsa.c rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); h 5912 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 5920 drivers/scsi/hpsa.c static int wait_for_device_to_become_ready(struct ctlr_info *h, h 5930 drivers/scsi/hpsa.c c = cmd_alloc(h); h 5939 drivers/scsi/hpsa.c last_queue = h->nreply_queues - 1; h 5946 drivers/scsi/hpsa.c rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); h 5952 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "giving up on device.\n"); h 5954 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "device is ready.\n"); h 5956 drivers/scsi/hpsa.c cmd_free(h, c); h 5967 drivers/scsi/hpsa.c struct ctlr_info *h; h 5974 drivers/scsi/hpsa.c h = sdev_to_hba(scsicmd->device); h 5975 drivers/scsi/hpsa.c if (h == NULL) /* paranoia */ h 5978 drivers/scsi/hpsa.c spin_lock_irqsave(&h->reset_lock, flags); h 5979 drivers/scsi/hpsa.c h->reset_in_progress = 1; h 5980 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 5982 drivers/scsi/hpsa.c if (lockup_detected(h)) { h 5989 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); h 6000 drivers/scsi/hpsa.c if (lockup_detected(h)) { h 6004 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); h 6010 drivers/scsi/hpsa.c if (detect_controller_lockup(h)) { h 6014 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); h 6032 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); h 6046 drivers/scsi/hpsa.c rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); h 6055 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); h 6058 drivers/scsi/hpsa.c spin_lock_irqsave(&h->reset_lock, flags); h 6059 drivers/scsi/hpsa.c h->reset_in_progress = 0; h 6062 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 6072 drivers/scsi/hpsa.c static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, h 6076 drivers/scsi/hpsa.c struct CommandList *c = h->cmd_pool + idx; h 6078 drivers/scsi/hpsa.c if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { h 6079 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", h 6080 drivers/scsi/hpsa.c idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); h 6094 drivers/scsi/hpsa.c if (idx != h->last_collision_tag) { /* Print once per tag */ h 6095 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 6099 drivers/scsi/hpsa.c h->last_collision_tag = idx; h 6106 drivers/scsi/hpsa.c hpsa_cmd_partial_init(h, idx, c); h 6110 drivers/scsi/hpsa.c static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) h 6128 drivers/scsi/hpsa.c static struct CommandList *cmd_alloc(struct ctlr_info *h) h 6154 drivers/scsi/hpsa.c i = find_next_zero_bit(h->cmd_pool_bits, h 6161 drivers/scsi/hpsa.c c = h->cmd_pool + i; h 6164 drivers/scsi/hpsa.c cmd_free(h, c); /* already in use */ h 6169 drivers/scsi/hpsa.c h->cmd_pool_bits + (i / BITS_PER_LONG)); h 6172 drivers/scsi/hpsa.c hpsa_cmd_partial_init(h, i, c); h 6183 drivers/scsi/hpsa.c static void cmd_free(struct ctlr_info *h, struct CommandList *c) h 6188 drivers/scsi/hpsa.c i = c - h->cmd_pool; h 6190 drivers/scsi/hpsa.c h->cmd_pool_bits + (i / BITS_PER_LONG)); h 6302 drivers/scsi/hpsa.c static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) h 6308 drivers/scsi/hpsa.c pciinfo.domain = pci_domain_nr(h->pdev->bus); h 6309 drivers/scsi/hpsa.c pciinfo.bus = h->pdev->bus->number; h 6310 drivers/scsi/hpsa.c pciinfo.dev_fn = h->pdev->devfn; h 6311 drivers/scsi/hpsa.c pciinfo.board_id = h->board_id; h 6317 drivers/scsi/hpsa.c static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) h 6326 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "driver version string '%s' " h 6340 drivers/scsi/hpsa.c static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) h 6373 drivers/scsi/hpsa.c c = cmd_alloc(h); h 6395 drivers/scsi/hpsa.c temp64 = dma_map_single(&h->pdev->dev, buff, h 6397 drivers/scsi/hpsa.c if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { h 6407 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, h 6410 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); h 6411 drivers/scsi/hpsa.c check_ioctl_unit_attention(h, c); h 6433 drivers/scsi/hpsa.c cmd_free(h, c); h 6439 drivers/scsi/hpsa.c static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) h 6506 drivers/scsi/hpsa.c c = cmd_alloc(h); h 6518 drivers/scsi/hpsa.c temp64 = dma_map_single(&h->pdev->dev, buff[i], h 6520 drivers/scsi/hpsa.c if (dma_mapping_error(&h->pdev->dev, h 6524 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, i, h 6535 drivers/scsi/hpsa.c status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, h 6538 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); h 6539 drivers/scsi/hpsa.c check_ioctl_unit_attention(h, c); h 6566 drivers/scsi/hpsa.c cmd_free(h, c); h 6580 drivers/scsi/hpsa.c static void check_ioctl_unit_attention(struct ctlr_info *h, h 6585 drivers/scsi/hpsa.c (void) check_for_unit_attention(h, c); h 6594 drivers/scsi/hpsa.c struct ctlr_info *h; h 6598 drivers/scsi/hpsa.c h = sdev_to_hba(dev); h 6604 drivers/scsi/hpsa.c hpsa_scan_start(h->scsi_host); h 6607 drivers/scsi/hpsa.c return hpsa_getpciinfo_ioctl(h, argp); h 6609 drivers/scsi/hpsa.c return hpsa_getdrivver_ioctl(h, argp); h 6611 drivers/scsi/hpsa.c if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) h 6613 drivers/scsi/hpsa.c rc = hpsa_passthru_ioctl(h, argp); h 6614 drivers/scsi/hpsa.c atomic_inc(&h->passthru_cmds_avail); h 6617 drivers/scsi/hpsa.c if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) h 6619 drivers/scsi/hpsa.c rc = hpsa_big_passthru_ioctl(h, argp); h 6620 drivers/scsi/hpsa.c atomic_inc(&h->passthru_cmds_avail); h 6627 drivers/scsi/hpsa.c static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) h 6631 drivers/scsi/hpsa.c c = cmd_alloc(h); h 6634 drivers/scsi/hpsa.c (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, h 6638 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); h 6646 drivers/scsi/hpsa.c static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, h 6809 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); h 6845 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "unknown message type %d\n", h 6850 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); h 6867 drivers/scsi/hpsa.c if (hpsa_map_one(h->pdev, c, buff, size, dir)) h 6885 drivers/scsi/hpsa.c static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) h 6887 drivers/scsi/hpsa.c return h->access.command_completed(h, q); h 6890 drivers/scsi/hpsa.c static inline bool interrupt_pending(struct ctlr_info *h) h 6892 drivers/scsi/hpsa.c return h->access.intr_pending(h); h 6895 drivers/scsi/hpsa.c static inline long interrupt_not_for_us(struct ctlr_info *h) h 6897 drivers/scsi/hpsa.c return (h->access.intr_pending(h) == 0) || h 6898 drivers/scsi/hpsa.c (h->interrupts_enabled == 0); h 6901 drivers/scsi/hpsa.c static inline int bad_tag(struct ctlr_info *h, u32 tag_index, h 6904 drivers/scsi/hpsa.c if (unlikely(tag_index >= h->nr_cmds)) { h 6905 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); h 6913 drivers/scsi/hpsa.c dial_up_lockup_detection_on_fw_flash_complete(c->h, c); h 6922 drivers/scsi/hpsa.c static inline void process_indexed_cmd(struct ctlr_info *h, h 6929 drivers/scsi/hpsa.c if (!bad_tag(h, tag_index, raw_tag)) { h 6930 drivers/scsi/hpsa.c c = h->cmd_pool + tag_index; h 6940 drivers/scsi/hpsa.c static int ignore_bogus_interrupt(struct ctlr_info *h) h 6945 drivers/scsi/hpsa.c if (likely(h->interrupts_enabled)) h 6948 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " h 6966 drivers/scsi/hpsa.c struct ctlr_info *h = queue_to_hba(queue); h 6970 drivers/scsi/hpsa.c if (ignore_bogus_interrupt(h)) h 6973 drivers/scsi/hpsa.c if (interrupt_not_for_us(h)) h 6975 drivers/scsi/hpsa.c h->last_intr_timestamp = get_jiffies_64(); h 6976 drivers/scsi/hpsa.c while (interrupt_pending(h)) { h 6977 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); h 6979 drivers/scsi/hpsa.c raw_tag = next_command(h, q); h 6986 drivers/scsi/hpsa.c struct ctlr_info *h = queue_to_hba(queue); h 6990 drivers/scsi/hpsa.c if (ignore_bogus_interrupt(h)) h 6993 drivers/scsi/hpsa.c h->last_intr_timestamp = get_jiffies_64(); h 6994 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); h 6996 drivers/scsi/hpsa.c raw_tag = next_command(h, q); h 7002 drivers/scsi/hpsa.c struct ctlr_info *h = queue_to_hba((u8 *) queue); h 7006 drivers/scsi/hpsa.c if (interrupt_not_for_us(h)) h 7008 drivers/scsi/hpsa.c h->last_intr_timestamp = get_jiffies_64(); h 7009 drivers/scsi/hpsa.c while (interrupt_pending(h)) { h 7010 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); h 7012 drivers/scsi/hpsa.c process_indexed_cmd(h, raw_tag); h 7013 drivers/scsi/hpsa.c raw_tag = next_command(h, q); h 7021 drivers/scsi/hpsa.c struct ctlr_info *h = queue_to_hba(queue); h 7025 drivers/scsi/hpsa.c h->last_intr_timestamp = get_jiffies_64(); h 7026 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); h 7028 drivers/scsi/hpsa.c process_indexed_cmd(h, raw_tag); h 7029 drivers/scsi/hpsa.c raw_tag = next_command(h, q); h 7432 drivers/scsi/hpsa.c static void hpsa_disable_interrupt_mode(struct ctlr_info *h) h 7434 drivers/scsi/hpsa.c pci_free_irq_vectors(h->pdev); h 7435 drivers/scsi/hpsa.c h->msix_vectors = 0; h 7438 drivers/scsi/hpsa.c static void hpsa_setup_reply_map(struct ctlr_info *h) h 7443 drivers/scsi/hpsa.c for (queue = 0; queue < h->msix_vectors; queue++) { h 7444 drivers/scsi/hpsa.c mask = pci_irq_get_affinity(h->pdev, queue); h 7449 drivers/scsi/hpsa.c h->reply_map[cpu] = queue; h 7455 drivers/scsi/hpsa.c h->reply_map[cpu] = 0; h 7461 drivers/scsi/hpsa.c static int hpsa_interrupt_mode(struct ctlr_info *h) h 7467 drivers/scsi/hpsa.c switch (h->board_id) { h 7474 drivers/scsi/hpsa.c ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES, h 7477 drivers/scsi/hpsa.c h->msix_vectors = ret; h 7485 drivers/scsi/hpsa.c ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags); h 7580 drivers/scsi/hpsa.c static void hpsa_free_cfgtables(struct ctlr_info *h) h 7582 drivers/scsi/hpsa.c if (h->transtable) { h 7583 drivers/scsi/hpsa.c iounmap(h->transtable); h 7584 drivers/scsi/hpsa.c h->transtable = NULL; h 7586 drivers/scsi/hpsa.c if (h->cfgtable) { h 7587 drivers/scsi/hpsa.c iounmap(h->cfgtable); h 7588 drivers/scsi/hpsa.c h->cfgtable = NULL; h 7595 drivers/scsi/hpsa.c static int hpsa_find_cfgtables(struct ctlr_info *h) h 7603 drivers/scsi/hpsa.c rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, h 7607 drivers/scsi/hpsa.c h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, h 7608 drivers/scsi/hpsa.c cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); h 7609 drivers/scsi/hpsa.c if (!h->cfgtable) { h 7610 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); h 7613 drivers/scsi/hpsa.c rc = write_driver_ver_to_cfgtable(h->cfgtable); h 7617 drivers/scsi/hpsa.c trans_offset = readl(&h->cfgtable->TransMethodOffset); h 7618 drivers/scsi/hpsa.c h->transtable = remap_pci_mem(pci_resource_start(h->pdev, h 7620 drivers/scsi/hpsa.c sizeof(*h->transtable)); h 7621 drivers/scsi/hpsa.c if (!h->transtable) { h 7622 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); h 7623 drivers/scsi/hpsa.c hpsa_free_cfgtables(h); h 7629 drivers/scsi/hpsa.c static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) h 7634 drivers/scsi/hpsa.c h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); h 7637 drivers/scsi/hpsa.c if (reset_devices && h->max_commands > 32) h 7638 drivers/scsi/hpsa.c h->max_commands = 32; h 7640 drivers/scsi/hpsa.c if (h->max_commands < MIN_MAX_COMMANDS) { h 7641 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 7643 drivers/scsi/hpsa.c h->max_commands, h 7645 drivers/scsi/hpsa.c h->max_commands = MIN_MAX_COMMANDS; h 7653 drivers/scsi/hpsa.c static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) h 7655 drivers/scsi/hpsa.c return h->maxsgentries > 512; h 7662 drivers/scsi/hpsa.c static void hpsa_find_board_params(struct ctlr_info *h) h 7664 drivers/scsi/hpsa.c hpsa_get_max_perf_mode_cmds(h); h 7665 drivers/scsi/hpsa.c h->nr_cmds = h->max_commands; h 7666 drivers/scsi/hpsa.c h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); h 7667 drivers/scsi/hpsa.c h->fw_support = readl(&(h->cfgtable->misc_fw_support)); h 7668 drivers/scsi/hpsa.c if (hpsa_supports_chained_sg_blocks(h)) { h 7670 drivers/scsi/hpsa.c h->max_cmd_sg_entries = 32; h 7671 drivers/scsi/hpsa.c h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; h 7672 drivers/scsi/hpsa.c h->maxsgentries--; /* save one for chain pointer */ h 7679 drivers/scsi/hpsa.c h->max_cmd_sg_entries = 31; h 7680 drivers/scsi/hpsa.c h->maxsgentries = 31; /* default to traditional values */ h 7681 drivers/scsi/hpsa.c h->chainsize = 0; h 7685 drivers/scsi/hpsa.c h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); h 7686 drivers/scsi/hpsa.c if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) h 7687 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); h 7688 drivers/scsi/hpsa.c if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) h 7689 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); h 7690 drivers/scsi/hpsa.c if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) h 7691 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); h 7694 drivers/scsi/hpsa.c static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) h 7696 drivers/scsi/hpsa.c if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { h 7697 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "not a valid CISS config table\n"); h 7703 drivers/scsi/hpsa.c static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) h 7707 drivers/scsi/hpsa.c driver_support = readl(&(h->cfgtable->driver_support)); h 7713 drivers/scsi/hpsa.c writel(driver_support, &(h->cfgtable->driver_support)); h 7719 drivers/scsi/hpsa.c static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) h 7723 drivers/scsi/hpsa.c if (h->board_id != 0x3225103C) h 7725 drivers/scsi/hpsa.c dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); h 7727 drivers/scsi/hpsa.c writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); h 7730 drivers/scsi/hpsa.c static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) h 7737 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 7738 drivers/scsi/hpsa.c doorbell_value = readl(h->vaddr + SA5_DOORBELL); h 7739 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 7750 drivers/scsi/hpsa.c static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) h 7761 drivers/scsi/hpsa.c if (h->remove_in_progress) h 7763 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 7764 drivers/scsi/hpsa.c doorbell_value = readl(h->vaddr + SA5_DOORBELL); h 7765 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 7777 drivers/scsi/hpsa.c static int hpsa_enter_simple_mode(struct ctlr_info *h) h 7781 drivers/scsi/hpsa.c trans_support = readl(&(h->cfgtable->TransportSupport)); h 7785 drivers/scsi/hpsa.c h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); h 7788 drivers/scsi/hpsa.c writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); h 7789 drivers/scsi/hpsa.c writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); h 7790 drivers/scsi/hpsa.c writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); h 7791 drivers/scsi/hpsa.c if (hpsa_wait_for_mode_change_ack(h)) h 7793 drivers/scsi/hpsa.c print_cfg_table(&h->pdev->dev, h->cfgtable); h 7794 drivers/scsi/hpsa.c if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) h 7796 drivers/scsi/hpsa.c h->transMethod = CFGTBL_Trans_Simple; h 7799 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "failed to enter simple mode\n"); h 7804 drivers/scsi/hpsa.c static void hpsa_free_pci_init(struct ctlr_info *h) h 7806 drivers/scsi/hpsa.c hpsa_free_cfgtables(h); /* pci_init 4 */ h 7807 drivers/scsi/hpsa.c iounmap(h->vaddr); /* pci_init 3 */ h 7808 drivers/scsi/hpsa.c h->vaddr = NULL; h 7809 drivers/scsi/hpsa.c hpsa_disable_interrupt_mode(h); /* pci_init 2 */ h 7814 drivers/scsi/hpsa.c pci_disable_device(h->pdev); /* pci_init 1 */ h 7815 drivers/scsi/hpsa.c pci_release_regions(h->pdev); /* pci_init 2 */ h 7819 drivers/scsi/hpsa.c static int hpsa_pci_init(struct ctlr_info *h) h 7824 drivers/scsi/hpsa.c prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board); h 7827 drivers/scsi/hpsa.c h->product_name = products[prod_index].product_name; h 7828 drivers/scsi/hpsa.c h->access = *(products[prod_index].access); h 7829 drivers/scsi/hpsa.c h->legacy_board = legacy_board; h 7830 drivers/scsi/hpsa.c pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | h 7833 drivers/scsi/hpsa.c err = pci_enable_device(h->pdev); h 7835 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "failed to enable PCI device\n"); h 7836 drivers/scsi/hpsa.c pci_disable_device(h->pdev); h 7840 drivers/scsi/hpsa.c err = pci_request_regions(h->pdev, HPSA); h 7842 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 7844 drivers/scsi/hpsa.c pci_disable_device(h->pdev); h 7848 drivers/scsi/hpsa.c pci_set_master(h->pdev); h 7850 drivers/scsi/hpsa.c err = hpsa_interrupt_mode(h); h 7855 drivers/scsi/hpsa.c hpsa_setup_reply_map(h); h 7857 drivers/scsi/hpsa.c err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); h 7860 drivers/scsi/hpsa.c h->vaddr = remap_pci_mem(h->paddr, 0x250); h 7861 drivers/scsi/hpsa.c if (!h->vaddr) { h 7862 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); h 7866 drivers/scsi/hpsa.c err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); h 7869 drivers/scsi/hpsa.c err = hpsa_find_cfgtables(h); h 7872 drivers/scsi/hpsa.c hpsa_find_board_params(h); h 7874 drivers/scsi/hpsa.c if (!hpsa_CISS_signature_present(h)) { h 7878 drivers/scsi/hpsa.c hpsa_set_driver_support_bits(h); h 7879 drivers/scsi/hpsa.c hpsa_p600_dma_prefetch_quirk(h); h 7880 drivers/scsi/hpsa.c err = hpsa_enter_simple_mode(h); h 7886 drivers/scsi/hpsa.c hpsa_free_cfgtables(h); h 7888 drivers/scsi/hpsa.c iounmap(h->vaddr); h 7889 drivers/scsi/hpsa.c h->vaddr = NULL; h 7891 drivers/scsi/hpsa.c hpsa_disable_interrupt_mode(h); h 7897 drivers/scsi/hpsa.c pci_disable_device(h->pdev); h 7898 drivers/scsi/hpsa.c pci_release_regions(h->pdev); h 7902 drivers/scsi/hpsa.c static void hpsa_hba_inquiry(struct ctlr_info *h) h 7907 drivers/scsi/hpsa.c h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); h 7908 drivers/scsi/hpsa.c if (!h->hba_inquiry_data) h 7910 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, h 7911 drivers/scsi/hpsa.c h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); h 7913 drivers/scsi/hpsa.c kfree(h->hba_inquiry_data); h 7914 drivers/scsi/hpsa.c h->hba_inquiry_data = NULL; h 7980 drivers/scsi/hpsa.c static void hpsa_free_cmd_pool(struct ctlr_info *h) h 7982 drivers/scsi/hpsa.c kfree(h->cmd_pool_bits); h 7983 drivers/scsi/hpsa.c h->cmd_pool_bits = NULL; h 7984 drivers/scsi/hpsa.c if (h->cmd_pool) { h 7985 drivers/scsi/hpsa.c dma_free_coherent(&h->pdev->dev, h 7986 drivers/scsi/hpsa.c h->nr_cmds * sizeof(struct CommandList), h 7987 drivers/scsi/hpsa.c h->cmd_pool, h 7988 drivers/scsi/hpsa.c h->cmd_pool_dhandle); h 7989 drivers/scsi/hpsa.c h->cmd_pool = NULL; h 7990 drivers/scsi/hpsa.c h->cmd_pool_dhandle = 0; h 7992 drivers/scsi/hpsa.c if (h->errinfo_pool) { h 7993 drivers/scsi/hpsa.c dma_free_coherent(&h->pdev->dev, h 7994 drivers/scsi/hpsa.c h->nr_cmds * sizeof(struct ErrorInfo), h 7995 drivers/scsi/hpsa.c h->errinfo_pool, h 7996 drivers/scsi/hpsa.c h->errinfo_pool_dhandle); h 7997 drivers/scsi/hpsa.c h->errinfo_pool = NULL; h 7998 drivers/scsi/hpsa.c h->errinfo_pool_dhandle = 0; h 8002 drivers/scsi/hpsa.c static int hpsa_alloc_cmd_pool(struct ctlr_info *h) h 8004 drivers/scsi/hpsa.c h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG), h 8007 drivers/scsi/hpsa.c h->cmd_pool = dma_alloc_coherent(&h->pdev->dev, h 8008 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->cmd_pool), h 8009 drivers/scsi/hpsa.c &h->cmd_pool_dhandle, GFP_KERNEL); h 8010 drivers/scsi/hpsa.c h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev, h 8011 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->errinfo_pool), h 8012 drivers/scsi/hpsa.c &h->errinfo_pool_dhandle, GFP_KERNEL); h 8013 drivers/scsi/hpsa.c if ((h->cmd_pool_bits == NULL) h 8014 drivers/scsi/hpsa.c || (h->cmd_pool == NULL) h 8015 drivers/scsi/hpsa.c || (h->errinfo_pool == NULL)) { h 8016 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "out of memory in %s", __func__); h 8019 drivers/scsi/hpsa.c hpsa_preinitialize_commands(h); h 8022 drivers/scsi/hpsa.c hpsa_free_cmd_pool(h); h 8027 drivers/scsi/hpsa.c static void hpsa_free_irqs(struct ctlr_info *h) h 8033 drivers/scsi/hpsa.c irq_vector = h->intr_mode; h 8035 drivers/scsi/hpsa.c if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { h 8037 drivers/scsi/hpsa.c free_irq(pci_irq_vector(h->pdev, irq_vector), h 8038 drivers/scsi/hpsa.c &h->q[h->intr_mode]); h 8039 drivers/scsi/hpsa.c h->q[h->intr_mode] = 0; h 8043 drivers/scsi/hpsa.c for (i = 0; i < h->msix_vectors; i++) { h 8044 drivers/scsi/hpsa.c free_irq(pci_irq_vector(h->pdev, i), &h->q[i]); h 8045 drivers/scsi/hpsa.c h->q[i] = 0; h 8048 drivers/scsi/hpsa.c h->q[i] = 0; h 8052 drivers/scsi/hpsa.c static int hpsa_request_irqs(struct ctlr_info *h, h 8060 drivers/scsi/hpsa.c irq_vector = h->intr_mode; h 8067 drivers/scsi/hpsa.c h->q[i] = (u8) i; h 8069 drivers/scsi/hpsa.c if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) { h 8071 drivers/scsi/hpsa.c for (i = 0; i < h->msix_vectors; i++) { h 8072 drivers/scsi/hpsa.c sprintf(h->intrname[i], "%s-msix%d", h->devname, i); h 8073 drivers/scsi/hpsa.c rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler, h 8074 drivers/scsi/hpsa.c 0, h->intrname[i], h 8075 drivers/scsi/hpsa.c &h->q[i]); h 8079 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 8081 drivers/scsi/hpsa.c pci_irq_vector(h->pdev, i), h->devname); h 8083 drivers/scsi/hpsa.c free_irq(pci_irq_vector(h->pdev, j), &h->q[j]); h 8084 drivers/scsi/hpsa.c h->q[j] = 0; h 8087 drivers/scsi/hpsa.c h->q[j] = 0; h 8093 drivers/scsi/hpsa.c if (h->msix_vectors > 0 || h->pdev->msi_enabled) { h 8094 drivers/scsi/hpsa.c sprintf(h->intrname[0], "%s-msi%s", h->devname, h 8095 drivers/scsi/hpsa.c h->msix_vectors ? "x" : ""); h 8096 drivers/scsi/hpsa.c rc = request_irq(pci_irq_vector(h->pdev, irq_vector), h 8098 drivers/scsi/hpsa.c h->intrname[0], h 8099 drivers/scsi/hpsa.c &h->q[h->intr_mode]); h 8101 drivers/scsi/hpsa.c sprintf(h->intrname[h->intr_mode], h 8102 drivers/scsi/hpsa.c "%s-intx", h->devname); h 8103 drivers/scsi/hpsa.c rc = request_irq(pci_irq_vector(h->pdev, irq_vector), h 8105 drivers/scsi/hpsa.c h->intrname[0], h 8106 drivers/scsi/hpsa.c &h->q[h->intr_mode]); h 8110 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", h 8111 drivers/scsi/hpsa.c pci_irq_vector(h->pdev, irq_vector), h->devname); h 8112 drivers/scsi/hpsa.c hpsa_free_irqs(h); h 8118 drivers/scsi/hpsa.c static int hpsa_kdump_soft_reset(struct ctlr_info *h) h 8121 drivers/scsi/hpsa.c hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); h 8123 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); h 8124 drivers/scsi/hpsa.c rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); h 8126 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); h 8130 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); h 8131 drivers/scsi/hpsa.c rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); h 8133 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Board failed to become ready " h 8141 drivers/scsi/hpsa.c static void hpsa_free_reply_queues(struct ctlr_info *h) h 8145 drivers/scsi/hpsa.c for (i = 0; i < h->nreply_queues; i++) { h 8146 drivers/scsi/hpsa.c if (!h->reply_queue[i].head) h 8148 drivers/scsi/hpsa.c dma_free_coherent(&h->pdev->dev, h 8149 drivers/scsi/hpsa.c h->reply_queue_size, h 8150 drivers/scsi/hpsa.c h->reply_queue[i].head, h 8151 drivers/scsi/hpsa.c h->reply_queue[i].busaddr); h 8152 drivers/scsi/hpsa.c h->reply_queue[i].head = NULL; h 8153 drivers/scsi/hpsa.c h->reply_queue[i].busaddr = 0; h 8155 drivers/scsi/hpsa.c h->reply_queue_size = 0; h 8158 drivers/scsi/hpsa.c static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) h 8160 drivers/scsi/hpsa.c hpsa_free_performant_mode(h); /* init_one 7 */ h 8161 drivers/scsi/hpsa.c hpsa_free_sg_chain_blocks(h); /* init_one 6 */ h 8162 drivers/scsi/hpsa.c hpsa_free_cmd_pool(h); /* init_one 5 */ h 8163 drivers/scsi/hpsa.c hpsa_free_irqs(h); /* init_one 4 */ h 8164 drivers/scsi/hpsa.c scsi_host_put(h->scsi_host); /* init_one 3 */ h 8165 drivers/scsi/hpsa.c h->scsi_host = NULL; /* init_one 3 */ h 8166 drivers/scsi/hpsa.c hpsa_free_pci_init(h); /* init_one 2_5 */ h 8167 drivers/scsi/hpsa.c free_percpu(h->lockup_detected); /* init_one 2 */ h 8168 drivers/scsi/hpsa.c h->lockup_detected = NULL; /* init_one 2 */ h 8169 drivers/scsi/hpsa.c if (h->resubmit_wq) { h 8170 drivers/scsi/hpsa.c destroy_workqueue(h->resubmit_wq); /* init_one 1 */ h 8171 drivers/scsi/hpsa.c h->resubmit_wq = NULL; h 8173 drivers/scsi/hpsa.c if (h->rescan_ctlr_wq) { h 8174 drivers/scsi/hpsa.c destroy_workqueue(h->rescan_ctlr_wq); h 8175 drivers/scsi/hpsa.c h->rescan_ctlr_wq = NULL; h 8177 drivers/scsi/hpsa.c if (h->monitor_ctlr_wq) { h 8178 drivers/scsi/hpsa.c destroy_workqueue(h->monitor_ctlr_wq); h 8179 drivers/scsi/hpsa.c h->monitor_ctlr_wq = NULL; h 8182 drivers/scsi/hpsa.c kfree(h); /* init_one 1 */ h 8186 drivers/scsi/hpsa.c static void fail_all_outstanding_cmds(struct ctlr_info *h) h 8192 drivers/scsi/hpsa.c flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ h 8193 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 8194 drivers/scsi/hpsa.c c = h->cmd_pool + i; h 8199 drivers/scsi/hpsa.c atomic_dec(&h->commands_outstanding); h 8202 drivers/scsi/hpsa.c cmd_free(h, c); h 8204 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 8208 drivers/scsi/hpsa.c static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) h 8214 drivers/scsi/hpsa.c lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); h 8220 drivers/scsi/hpsa.c static void controller_lockup_detected(struct ctlr_info *h) h 8225 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_OFF); h 8226 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8227 drivers/scsi/hpsa.c lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); h 8230 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 8232 drivers/scsi/hpsa.c h->heartbeat_sample_interval / HZ); h 8235 drivers/scsi/hpsa.c set_lockup_detected_for_all_cpus(h, lockup_detected); h 8236 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8237 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", h 8238 drivers/scsi/hpsa.c lockup_detected, h->heartbeat_sample_interval / HZ); h 8240 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n"); h 8241 drivers/scsi/hpsa.c writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL); h 8243 drivers/scsi/hpsa.c pci_disable_device(h->pdev); h 8244 drivers/scsi/hpsa.c fail_all_outstanding_cmds(h); h 8247 drivers/scsi/hpsa.c static int detect_controller_lockup(struct ctlr_info *h) h 8255 drivers/scsi/hpsa.c if (time_after64(h->last_intr_timestamp + h 8256 drivers/scsi/hpsa.c (h->heartbeat_sample_interval), now)) h 8264 drivers/scsi/hpsa.c if (time_after64(h->last_heartbeat_timestamp + h 8265 drivers/scsi/hpsa.c (h->heartbeat_sample_interval), now)) h 8269 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8270 drivers/scsi/hpsa.c heartbeat = readl(&h->cfgtable->HeartBeat); h 8271 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8272 drivers/scsi/hpsa.c if (h->last_heartbeat == heartbeat) { h 8273 drivers/scsi/hpsa.c controller_lockup_detected(h); h 8278 drivers/scsi/hpsa.c h->last_heartbeat = heartbeat; h 8279 drivers/scsi/hpsa.c h->last_heartbeat_timestamp = now; h 8292 drivers/scsi/hpsa.c static void hpsa_set_ioaccel_status(struct ctlr_info *h) h 8300 drivers/scsi/hpsa.c if (!h) h 8310 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 8311 drivers/scsi/hpsa.c device = h->dev[i]; h 8315 drivers/scsi/hpsa.c if (!hpsa_vpd_page_supported(h, device->scsi3addr, h 8321 drivers/scsi/hpsa.c rc = hpsa_scsi_do_inquiry(h, device->scsi3addr, h 8352 drivers/scsi/hpsa.c static void hpsa_ack_ctlr_events(struct ctlr_info *h) h 8356 drivers/scsi/hpsa.c if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) h 8360 drivers/scsi/hpsa.c if ((h->transMethod & (CFGTBL_Trans_io_accel1 h 8362 drivers/scsi/hpsa.c (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || h 8363 drivers/scsi/hpsa.c h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { h 8365 drivers/scsi/hpsa.c if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) h 8367 drivers/scsi/hpsa.c if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) h 8370 drivers/scsi/hpsa.c scsi_block_requests(h->scsi_host); h 8371 drivers/scsi/hpsa.c hpsa_set_ioaccel_status(h); h 8372 drivers/scsi/hpsa.c hpsa_drain_accel_commands(h); h 8374 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 8376 drivers/scsi/hpsa.c h->events, event_type); h 8377 drivers/scsi/hpsa.c writel(h->events, &(h->cfgtable->clear_event_notify)); h 8379 drivers/scsi/hpsa.c writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); h 8381 drivers/scsi/hpsa.c hpsa_wait_for_clear_event_notify_ack(h); h 8382 drivers/scsi/hpsa.c scsi_unblock_requests(h->scsi_host); h 8385 drivers/scsi/hpsa.c writel(h->events, &(h->cfgtable->clear_event_notify)); h 8386 drivers/scsi/hpsa.c writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); h 8387 drivers/scsi/hpsa.c hpsa_wait_for_clear_event_notify_ack(h); h 8397 drivers/scsi/hpsa.c static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) h 8399 drivers/scsi/hpsa.c if (h->drv_req_rescan) { h 8400 drivers/scsi/hpsa.c h->drv_req_rescan = 0; h 8404 drivers/scsi/hpsa.c if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) h 8407 drivers/scsi/hpsa.c h->events = readl(&(h->cfgtable->event_notify)); h 8408 drivers/scsi/hpsa.c return h->events & RESCAN_REQUIRED_EVENT_BITS; h 8414 drivers/scsi/hpsa.c static int hpsa_offline_devices_ready(struct ctlr_info *h) h 8420 drivers/scsi/hpsa.c spin_lock_irqsave(&h->offline_device_lock, flags); h 8421 drivers/scsi/hpsa.c list_for_each_safe(this, tmp, &h->offline_device_list) { h 8424 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->offline_device_lock, flags); h 8425 drivers/scsi/hpsa.c if (!hpsa_volume_offline(h, d->scsi3addr)) { h 8426 drivers/scsi/hpsa.c spin_lock_irqsave(&h->offline_device_lock, flags); h 8428 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->offline_device_lock, flags); h 8431 drivers/scsi/hpsa.c spin_lock_irqsave(&h->offline_device_lock, flags); h 8433 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->offline_device_lock, flags); h 8437 drivers/scsi/hpsa.c static int hpsa_luns_changed(struct ctlr_info *h) h 8446 drivers/scsi/hpsa.c if (!h->lastlogicals) h 8453 drivers/scsi/hpsa.c if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { h 8454 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 8458 drivers/scsi/hpsa.c if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { h 8459 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 8461 drivers/scsi/hpsa.c memcpy(h->lastlogicals, logdev, sizeof(*logdev)); h 8470 drivers/scsi/hpsa.c static void hpsa_perform_rescan(struct ctlr_info *h) h 8478 drivers/scsi/hpsa.c spin_lock_irqsave(&h->reset_lock, flags); h 8479 drivers/scsi/hpsa.c if (h->reset_in_progress) { h 8480 drivers/scsi/hpsa.c h->drv_req_rescan = 1; h 8481 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 8484 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->reset_lock, flags); h 8486 drivers/scsi/hpsa.c sh = scsi_host_get(h->scsi_host); h 8490 drivers/scsi/hpsa.c h->drv_req_rescan = 0; h 8499 drivers/scsi/hpsa.c struct ctlr_info *h = container_of(to_delayed_work(work), h 8503 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8504 drivers/scsi/hpsa.c if (h->remove_in_progress) { h 8505 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8508 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8510 drivers/scsi/hpsa.c if (hpsa_ctlr_needs_rescan(h)) { h 8511 drivers/scsi/hpsa.c hpsa_ack_ctlr_events(h); h 8512 drivers/scsi/hpsa.c hpsa_perform_rescan(h); h 8515 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8516 drivers/scsi/hpsa.c if (!h->remove_in_progress) h 8517 drivers/scsi/hpsa.c queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work, h 8519 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8525 drivers/scsi/hpsa.c struct ctlr_info *h = container_of(to_delayed_work(work), h 8528 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8529 drivers/scsi/hpsa.c if (h->remove_in_progress) { h 8530 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8533 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8535 drivers/scsi/hpsa.c if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { h 8536 drivers/scsi/hpsa.c hpsa_perform_rescan(h); h 8537 drivers/scsi/hpsa.c } else if (h->discovery_polling) { h 8538 drivers/scsi/hpsa.c if (hpsa_luns_changed(h)) { h 8539 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 8541 drivers/scsi/hpsa.c hpsa_perform_rescan(h); h 8544 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8545 drivers/scsi/hpsa.c if (!h->remove_in_progress) h 8546 drivers/scsi/hpsa.c queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, h 8547 drivers/scsi/hpsa.c h->heartbeat_sample_interval); h 8548 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8554 drivers/scsi/hpsa.c struct ctlr_info *h = container_of(to_delayed_work(work), h 8557 drivers/scsi/hpsa.c detect_controller_lockup(h); h 8558 drivers/scsi/hpsa.c if (lockup_detected(h)) h 8561 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8562 drivers/scsi/hpsa.c if (!h->remove_in_progress) h 8563 drivers/scsi/hpsa.c queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work, h 8564 drivers/scsi/hpsa.c h->heartbeat_sample_interval); h 8565 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8568 drivers/scsi/hpsa.c static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, h 8573 drivers/scsi/hpsa.c wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); h 8575 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); h 8580 drivers/scsi/hpsa.c static void hpda_free_ctlr_info(struct ctlr_info *h) h 8582 drivers/scsi/hpsa.c kfree(h->reply_map); h 8583 drivers/scsi/hpsa.c kfree(h); h 8588 drivers/scsi/hpsa.c struct ctlr_info *h; h 8590 drivers/scsi/hpsa.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 8591 drivers/scsi/hpsa.c if (!h) h 8594 drivers/scsi/hpsa.c h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); h 8595 drivers/scsi/hpsa.c if (!h->reply_map) { h 8596 drivers/scsi/hpsa.c kfree(h); h 8599 drivers/scsi/hpsa.c return h; h 8605 drivers/scsi/hpsa.c struct ctlr_info *h; h 8639 drivers/scsi/hpsa.c h = hpda_alloc_ctlr_info(); h 8640 drivers/scsi/hpsa.c if (!h) { h 8645 drivers/scsi/hpsa.c h->pdev = pdev; h 8647 drivers/scsi/hpsa.c h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; h 8648 drivers/scsi/hpsa.c INIT_LIST_HEAD(&h->offline_device_list); h 8649 drivers/scsi/hpsa.c spin_lock_init(&h->lock); h 8650 drivers/scsi/hpsa.c spin_lock_init(&h->offline_device_lock); h 8651 drivers/scsi/hpsa.c spin_lock_init(&h->scan_lock); h 8652 drivers/scsi/hpsa.c spin_lock_init(&h->reset_lock); h 8653 drivers/scsi/hpsa.c atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); h 8656 drivers/scsi/hpsa.c h->lockup_detected = alloc_percpu(u32); h 8657 drivers/scsi/hpsa.c if (!h->lockup_detected) { h 8658 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); h 8662 drivers/scsi/hpsa.c set_lockup_detected_for_all_cpus(h, 0); h 8664 drivers/scsi/hpsa.c rc = hpsa_pci_init(h); h 8670 drivers/scsi/hpsa.c rc = hpsa_scsi_host_alloc(h); h 8674 drivers/scsi/hpsa.c sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); h 8675 drivers/scsi/hpsa.c h->ctlr = number_of_controllers; h 8693 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_OFF); h 8695 drivers/scsi/hpsa.c rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); h 8698 drivers/scsi/hpsa.c rc = hpsa_alloc_cmd_pool(h); h 8701 drivers/scsi/hpsa.c rc = hpsa_alloc_sg_chain_blocks(h); h 8704 drivers/scsi/hpsa.c init_waitqueue_head(&h->scan_wait_queue); h 8705 drivers/scsi/hpsa.c init_waitqueue_head(&h->event_sync_wait_queue); h 8706 drivers/scsi/hpsa.c mutex_init(&h->reset_mutex); h 8707 drivers/scsi/hpsa.c h->scan_finished = 1; /* no scan currently in progress */ h 8708 drivers/scsi/hpsa.c h->scan_waiting = 0; h 8710 drivers/scsi/hpsa.c pci_set_drvdata(pdev, h); h 8711 drivers/scsi/hpsa.c h->ndevices = 0; h 8713 drivers/scsi/hpsa.c spin_lock_init(&h->devlock); h 8714 drivers/scsi/hpsa.c rc = hpsa_put_ctlr_into_performant_mode(h); h 8719 drivers/scsi/hpsa.c h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); h 8720 drivers/scsi/hpsa.c if (!h->rescan_ctlr_wq) { h 8725 drivers/scsi/hpsa.c h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); h 8726 drivers/scsi/hpsa.c if (!h->resubmit_wq) { h 8731 drivers/scsi/hpsa.c h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor"); h 8732 drivers/scsi/hpsa.c if (!h->monitor_ctlr_wq) { h 8751 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 8752 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_OFF); h 8753 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 8754 drivers/scsi/hpsa.c hpsa_free_irqs(h); h 8755 drivers/scsi/hpsa.c rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, h 8758 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 8764 drivers/scsi/hpsa.c hpsa_free_performant_mode(h); /* clean7 */ h 8765 drivers/scsi/hpsa.c hpsa_free_sg_chain_blocks(h); /* clean6 */ h 8766 drivers/scsi/hpsa.c hpsa_free_cmd_pool(h); /* clean5 */ h 8774 drivers/scsi/hpsa.c rc = hpsa_kdump_soft_reset(h); h 8779 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "Board READY.\n"); h 8780 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 8782 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_ON); h 8784 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_OFF); h 8786 drivers/scsi/hpsa.c rc = controller_reset_failed(h->cfgtable); h 8788 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 8795 drivers/scsi/hpsa.c hpsa_undo_allocations_after_kdump_soft_reset(h); h 8805 drivers/scsi/hpsa.c h->acciopath_status = 1; h 8807 drivers/scsi/hpsa.c h->discovery_polling = 0; h 8811 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_ON); h 8813 drivers/scsi/hpsa.c hpsa_hba_inquiry(h); h 8815 drivers/scsi/hpsa.c h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); h 8816 drivers/scsi/hpsa.c if (!h->lastlogicals) h 8817 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, h 8821 drivers/scsi/hpsa.c rc = hpsa_scsi_add_host(h); h 8826 drivers/scsi/hpsa.c h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; h 8827 drivers/scsi/hpsa.c INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); h 8828 drivers/scsi/hpsa.c schedule_delayed_work(&h->monitor_ctlr_work, h 8829 drivers/scsi/hpsa.c h->heartbeat_sample_interval); h 8830 drivers/scsi/hpsa.c INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); h 8831 drivers/scsi/hpsa.c queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, h 8832 drivers/scsi/hpsa.c h->heartbeat_sample_interval); h 8833 drivers/scsi/hpsa.c INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); h 8834 drivers/scsi/hpsa.c schedule_delayed_work(&h->event_monitor_work, h 8839 drivers/scsi/hpsa.c hpsa_free_performant_mode(h); h 8840 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_OFF); h 8842 drivers/scsi/hpsa.c hpsa_free_sg_chain_blocks(h); h 8844 drivers/scsi/hpsa.c hpsa_free_cmd_pool(h); h 8846 drivers/scsi/hpsa.c hpsa_free_irqs(h); h 8848 drivers/scsi/hpsa.c scsi_host_put(h->scsi_host); h 8849 drivers/scsi/hpsa.c h->scsi_host = NULL; h 8851 drivers/scsi/hpsa.c hpsa_free_pci_init(h); h 8853 drivers/scsi/hpsa.c if (h->lockup_detected) { h 8854 drivers/scsi/hpsa.c free_percpu(h->lockup_detected); h 8855 drivers/scsi/hpsa.c h->lockup_detected = NULL; h 8858 drivers/scsi/hpsa.c if (h->resubmit_wq) { h 8859 drivers/scsi/hpsa.c destroy_workqueue(h->resubmit_wq); h 8860 drivers/scsi/hpsa.c h->resubmit_wq = NULL; h 8862 drivers/scsi/hpsa.c if (h->rescan_ctlr_wq) { h 8863 drivers/scsi/hpsa.c destroy_workqueue(h->rescan_ctlr_wq); h 8864 drivers/scsi/hpsa.c h->rescan_ctlr_wq = NULL; h 8866 drivers/scsi/hpsa.c if (h->monitor_ctlr_wq) { h 8867 drivers/scsi/hpsa.c destroy_workqueue(h->monitor_ctlr_wq); h 8868 drivers/scsi/hpsa.c h->monitor_ctlr_wq = NULL; h 8870 drivers/scsi/hpsa.c kfree(h); h 8874 drivers/scsi/hpsa.c static void hpsa_flush_cache(struct ctlr_info *h) h 8880 drivers/scsi/hpsa.c if (unlikely(lockup_detected(h))) h 8886 drivers/scsi/hpsa.c c = cmd_alloc(h); h 8888 drivers/scsi/hpsa.c if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, h 8892 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, h 8898 drivers/scsi/hpsa.c dev_warn(&h->pdev->dev, h 8900 drivers/scsi/hpsa.c cmd_free(h, c); h 8907 drivers/scsi/hpsa.c static void hpsa_disable_rld_caching(struct ctlr_info *h) h 8914 drivers/scsi/hpsa.c if (unlikely(h->lockup_detected)) h 8921 drivers/scsi/hpsa.c c = cmd_alloc(h); h 8924 drivers/scsi/hpsa.c if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, h 8928 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 8936 drivers/scsi/hpsa.c if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, h 8940 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, h 8946 drivers/scsi/hpsa.c if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, h 8950 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, h 8959 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 8962 drivers/scsi/hpsa.c cmd_free(h, c); h 8968 drivers/scsi/hpsa.c struct ctlr_info *h; h 8970 drivers/scsi/hpsa.c h = pci_get_drvdata(pdev); h 8975 drivers/scsi/hpsa.c hpsa_flush_cache(h); h 8976 drivers/scsi/hpsa.c h->access.set_intr_mask(h, HPSA_INTR_OFF); h 8977 drivers/scsi/hpsa.c hpsa_free_irqs(h); /* init_one 4 */ h 8978 drivers/scsi/hpsa.c hpsa_disable_interrupt_mode(h); /* pci_init 2 */ h 8987 drivers/scsi/hpsa.c static void hpsa_free_device_info(struct ctlr_info *h) h 8991 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 8992 drivers/scsi/hpsa.c kfree(h->dev[i]); h 8993 drivers/scsi/hpsa.c h->dev[i] = NULL; h 8999 drivers/scsi/hpsa.c struct ctlr_info *h; h 9006 drivers/scsi/hpsa.c h = pci_get_drvdata(pdev); h 9009 drivers/scsi/hpsa.c spin_lock_irqsave(&h->lock, flags); h 9010 drivers/scsi/hpsa.c h->remove_in_progress = 1; h 9011 drivers/scsi/hpsa.c spin_unlock_irqrestore(&h->lock, flags); h 9012 drivers/scsi/hpsa.c cancel_delayed_work_sync(&h->monitor_ctlr_work); h 9013 drivers/scsi/hpsa.c cancel_delayed_work_sync(&h->rescan_ctlr_work); h 9014 drivers/scsi/hpsa.c cancel_delayed_work_sync(&h->event_monitor_work); h 9015 drivers/scsi/hpsa.c destroy_workqueue(h->rescan_ctlr_wq); h 9016 drivers/scsi/hpsa.c destroy_workqueue(h->resubmit_wq); h 9017 drivers/scsi/hpsa.c destroy_workqueue(h->monitor_ctlr_wq); h 9019 drivers/scsi/hpsa.c hpsa_delete_sas_host(h); h 9027 drivers/scsi/hpsa.c if (h->scsi_host) h 9028 drivers/scsi/hpsa.c scsi_remove_host(h->scsi_host); /* init_one 8 */ h 9033 drivers/scsi/hpsa.c hpsa_free_device_info(h); /* scan */ h 9035 drivers/scsi/hpsa.c kfree(h->hba_inquiry_data); /* init_one 10 */ h 9036 drivers/scsi/hpsa.c h->hba_inquiry_data = NULL; /* init_one 10 */ h 9037 drivers/scsi/hpsa.c hpsa_free_ioaccel2_sg_chain_blocks(h); h 9038 drivers/scsi/hpsa.c hpsa_free_performant_mode(h); /* init_one 7 */ h 9039 drivers/scsi/hpsa.c hpsa_free_sg_chain_blocks(h); /* init_one 6 */ h 9040 drivers/scsi/hpsa.c hpsa_free_cmd_pool(h); /* init_one 5 */ h 9041 drivers/scsi/hpsa.c kfree(h->lastlogicals); h 9045 drivers/scsi/hpsa.c scsi_host_put(h->scsi_host); /* init_one 3 */ h 9046 drivers/scsi/hpsa.c h->scsi_host = NULL; /* init_one 3 */ h 9049 drivers/scsi/hpsa.c hpsa_free_pci_init(h); /* init_one 2.5 */ h 9051 drivers/scsi/hpsa.c free_percpu(h->lockup_detected); /* init_one 2 */ h 9052 drivers/scsi/hpsa.c h->lockup_detected = NULL; /* init_one 2 */ h 9055 drivers/scsi/hpsa.c hpda_free_ctlr_info(h); /* init_one 1 */ h 9117 drivers/scsi/hpsa.c static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) h 9171 drivers/scsi/hpsa.c for (i = 0; i < h->nreply_queues; i++) h 9172 drivers/scsi/hpsa.c memset(h->reply_queue[i].head, 0, h->reply_queue_size); h 9176 drivers/scsi/hpsa.c SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); h 9178 drivers/scsi/hpsa.c writel(bft[i], &h->transtable->BlockFetch[i]); h 9181 drivers/scsi/hpsa.c writel(h->max_commands, &h->transtable->RepQSize); h 9182 drivers/scsi/hpsa.c writel(h->nreply_queues, &h->transtable->RepQCount); h 9183 drivers/scsi/hpsa.c writel(0, &h->transtable->RepQCtrAddrLow32); h 9184 drivers/scsi/hpsa.c writel(0, &h->transtable->RepQCtrAddrHigh32); h 9186 drivers/scsi/hpsa.c for (i = 0; i < h->nreply_queues; i++) { h 9187 drivers/scsi/hpsa.c writel(0, &h->transtable->RepQAddr[i].upper); h 9188 drivers/scsi/hpsa.c writel(h->reply_queue[i].busaddr, h 9189 drivers/scsi/hpsa.c &h->transtable->RepQAddr[i].lower); h 9192 drivers/scsi/hpsa.c writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); h 9193 drivers/scsi/hpsa.c writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); h 9199 drivers/scsi/hpsa.c writel(10, &h->cfgtable->HostWrite.CoalIntDelay); h 9200 drivers/scsi/hpsa.c writel(4, &h->cfgtable->HostWrite.CoalIntCount); h 9204 drivers/scsi/hpsa.c writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); h 9205 drivers/scsi/hpsa.c if (hpsa_wait_for_mode_change_ack(h)) { h 9206 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 9210 drivers/scsi/hpsa.c register_value = readl(&(h->cfgtable->TransportActive)); h 9212 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 9217 drivers/scsi/hpsa.c h->access = access; h 9218 drivers/scsi/hpsa.c h->transMethod = transMethod; h 9226 drivers/scsi/hpsa.c for (i = 0; i < h->nreply_queues; i++) { h 9227 drivers/scsi/hpsa.c writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); h 9228 drivers/scsi/hpsa.c h->reply_queue[i].current_entry = h 9229 drivers/scsi/hpsa.c readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); h 9231 drivers/scsi/hpsa.c bft[7] = h->ioaccel_maxsg + 8; h 9232 drivers/scsi/hpsa.c calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, h 9233 drivers/scsi/hpsa.c h->ioaccel1_blockFetchTable); h 9236 drivers/scsi/hpsa.c for (i = 0; i < h->nreply_queues; i++) h 9237 drivers/scsi/hpsa.c memset(h->reply_queue[i].head, h 9239 drivers/scsi/hpsa.c h->reply_queue_size); h 9244 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 9245 drivers/scsi/hpsa.c struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; h 9248 drivers/scsi/hpsa.c cp->err_info = (u32) (h->errinfo_pool_dhandle + h 9259 drivers/scsi/hpsa.c cpu_to_le64(h->ioaccel_cmd_pool_dhandle + h 9267 drivers/scsi/hpsa.c rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, h 9270 drivers/scsi/hpsa.c bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; h 9271 drivers/scsi/hpsa.c calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, h 9272 drivers/scsi/hpsa.c 4, h->ioaccel2_blockFetchTable); h 9273 drivers/scsi/hpsa.c bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); h 9276 drivers/scsi/hpsa.c h->ioaccel2_bft2_regs = h 9277 drivers/scsi/hpsa.c remap_pci_mem(pci_resource_start(h->pdev, h 9281 drivers/scsi/hpsa.c sizeof(*h->ioaccel2_bft2_regs)); h 9283 drivers/scsi/hpsa.c writel(bft2[i], &h->ioaccel2_bft2_regs[i]); h 9285 drivers/scsi/hpsa.c writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); h 9286 drivers/scsi/hpsa.c if (hpsa_wait_for_mode_change_ack(h)) { h 9287 drivers/scsi/hpsa.c dev_err(&h->pdev->dev, h 9295 drivers/scsi/hpsa.c static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) h 9297 drivers/scsi/hpsa.c if (h->ioaccel_cmd_pool) { h 9298 drivers/scsi/hpsa.c pci_free_consistent(h->pdev, h 9299 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), h 9300 drivers/scsi/hpsa.c h->ioaccel_cmd_pool, h 9301 drivers/scsi/hpsa.c h->ioaccel_cmd_pool_dhandle); h 9302 drivers/scsi/hpsa.c h->ioaccel_cmd_pool = NULL; h 9303 drivers/scsi/hpsa.c h->ioaccel_cmd_pool_dhandle = 0; h 9305 drivers/scsi/hpsa.c kfree(h->ioaccel1_blockFetchTable); h 9306 drivers/scsi/hpsa.c h->ioaccel1_blockFetchTable = NULL; h 9310 drivers/scsi/hpsa.c static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) h 9312 drivers/scsi/hpsa.c h->ioaccel_maxsg = h 9313 drivers/scsi/hpsa.c readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); h 9314 drivers/scsi/hpsa.c if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) h 9315 drivers/scsi/hpsa.c h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; h 9323 drivers/scsi/hpsa.c h->ioaccel_cmd_pool = h 9324 drivers/scsi/hpsa.c dma_alloc_coherent(&h->pdev->dev, h 9325 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), h 9326 drivers/scsi/hpsa.c &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); h 9328 drivers/scsi/hpsa.c h->ioaccel1_blockFetchTable = h 9329 drivers/scsi/hpsa.c kmalloc(((h->ioaccel_maxsg + 1) * h 9332 drivers/scsi/hpsa.c if ((h->ioaccel_cmd_pool == NULL) || h 9333 drivers/scsi/hpsa.c (h->ioaccel1_blockFetchTable == NULL)) h 9336 drivers/scsi/hpsa.c memset(h->ioaccel_cmd_pool, 0, h 9337 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); h 9341 drivers/scsi/hpsa.c hpsa_free_ioaccel1_cmd_and_bft(h); h 9346 drivers/scsi/hpsa.c static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) h 9348 drivers/scsi/hpsa.c hpsa_free_ioaccel2_sg_chain_blocks(h); h 9350 drivers/scsi/hpsa.c if (h->ioaccel2_cmd_pool) { h 9351 drivers/scsi/hpsa.c pci_free_consistent(h->pdev, h 9352 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), h 9353 drivers/scsi/hpsa.c h->ioaccel2_cmd_pool, h 9354 drivers/scsi/hpsa.c h->ioaccel2_cmd_pool_dhandle); h 9355 drivers/scsi/hpsa.c h->ioaccel2_cmd_pool = NULL; h 9356 drivers/scsi/hpsa.c h->ioaccel2_cmd_pool_dhandle = 0; h 9358 drivers/scsi/hpsa.c kfree(h->ioaccel2_blockFetchTable); h 9359 drivers/scsi/hpsa.c h->ioaccel2_blockFetchTable = NULL; h 9363 drivers/scsi/hpsa.c static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) h 9369 drivers/scsi/hpsa.c h->ioaccel_maxsg = h 9370 drivers/scsi/hpsa.c readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); h 9371 drivers/scsi/hpsa.c if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) h 9372 drivers/scsi/hpsa.c h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; h 9376 drivers/scsi/hpsa.c h->ioaccel2_cmd_pool = h 9377 drivers/scsi/hpsa.c dma_alloc_coherent(&h->pdev->dev, h 9378 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), h 9379 drivers/scsi/hpsa.c &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); h 9381 drivers/scsi/hpsa.c h->ioaccel2_blockFetchTable = h 9382 drivers/scsi/hpsa.c kmalloc(((h->ioaccel_maxsg + 1) * h 9385 drivers/scsi/hpsa.c if ((h->ioaccel2_cmd_pool == NULL) || h 9386 drivers/scsi/hpsa.c (h->ioaccel2_blockFetchTable == NULL)) { h 9391 drivers/scsi/hpsa.c rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); h 9395 drivers/scsi/hpsa.c memset(h->ioaccel2_cmd_pool, 0, h 9396 drivers/scsi/hpsa.c h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); h 9400 drivers/scsi/hpsa.c hpsa_free_ioaccel2_cmd_and_bft(h); h 9405 drivers/scsi/hpsa.c static void hpsa_free_performant_mode(struct ctlr_info *h) h 9407 drivers/scsi/hpsa.c kfree(h->blockFetchTable); h 9408 drivers/scsi/hpsa.c h->blockFetchTable = NULL; h 9409 drivers/scsi/hpsa.c hpsa_free_reply_queues(h); h 9410 drivers/scsi/hpsa.c hpsa_free_ioaccel1_cmd_and_bft(h); h 9411 drivers/scsi/hpsa.c hpsa_free_ioaccel2_cmd_and_bft(h); h 9417 drivers/scsi/hpsa.c static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) h 9427 drivers/scsi/hpsa.c trans_support = readl(&(h->cfgtable->TransportSupport)); h 9435 drivers/scsi/hpsa.c rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); h 9441 drivers/scsi/hpsa.c rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); h 9446 drivers/scsi/hpsa.c h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1; h 9447 drivers/scsi/hpsa.c hpsa_get_max_perf_mode_cmds(h); h 9449 drivers/scsi/hpsa.c h->reply_queue_size = h->max_commands * sizeof(u64); h 9451 drivers/scsi/hpsa.c for (i = 0; i < h->nreply_queues; i++) { h 9452 drivers/scsi/hpsa.c h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, h 9453 drivers/scsi/hpsa.c h->reply_queue_size, h 9454 drivers/scsi/hpsa.c &h->reply_queue[i].busaddr, h 9456 drivers/scsi/hpsa.c if (!h->reply_queue[i].head) { h 9460 drivers/scsi/hpsa.c h->reply_queue[i].size = h->max_commands; h 9461 drivers/scsi/hpsa.c h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h 9462 drivers/scsi/hpsa.c h->reply_queue[i].current_entry = 0; h 9466 drivers/scsi/hpsa.c h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * h 9468 drivers/scsi/hpsa.c if (!h->blockFetchTable) { h 9473 drivers/scsi/hpsa.c rc = hpsa_enter_performant_mode(h, trans_support); h 9479 drivers/scsi/hpsa.c kfree(h->blockFetchTable); h 9480 drivers/scsi/hpsa.c h->blockFetchTable = NULL; h 9482 drivers/scsi/hpsa.c hpsa_free_reply_queues(h); h 9483 drivers/scsi/hpsa.c hpsa_free_ioaccel1_cmd_and_bft(h); h 9484 drivers/scsi/hpsa.c hpsa_free_ioaccel2_cmd_and_bft(h); h 9493 drivers/scsi/hpsa.c static void hpsa_drain_accel_commands(struct ctlr_info *h) h 9501 drivers/scsi/hpsa.c for (i = 0; i < h->nr_cmds; i++) { h 9502 drivers/scsi/hpsa.c c = h->cmd_pool + i; h 9506 drivers/scsi/hpsa.c cmd_free(h, c); h 9678 drivers/scsi/hpsa.c *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, h 9684 drivers/scsi/hpsa.c for (i = 0; i < h->ndevices; i++) { h 9685 drivers/scsi/hpsa.c device = h->dev[i]; h 9695 drivers/scsi/hpsa.c static int hpsa_add_sas_host(struct ctlr_info *h) h 9703 drivers/scsi/hpsa.c parent_dev = &h->scsi_host->shost_dev; h 9709 drivers/scsi/hpsa.c hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); h 9725 drivers/scsi/hpsa.c h->sas_host = hpsa_sas_node; h 9739 drivers/scsi/hpsa.c static void hpsa_delete_sas_host(struct ctlr_info *h) h 9741 drivers/scsi/hpsa.c hpsa_free_sas_node(h->sas_host); h 9795 drivers/scsi/hpsa.c struct ctlr_info *h; h 9801 drivers/scsi/hpsa.c h = shost_to_hba(shost); h 9803 drivers/scsi/hpsa.c if (!h) h 9806 drivers/scsi/hpsa.c sd = hpsa_find_device_by_sas_rphy(h, rphy); h 30 drivers/scsi/hpsa.h void (*submit_command)(struct ctlr_info *h, h 32 drivers/scsi/hpsa.h void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); h 33 drivers/scsi/hpsa.h bool (*intr_pending)(struct ctlr_info *h); h 34 drivers/scsi/hpsa.h unsigned long (*command_completed)(struct ctlr_info *h, u8 q); h 419 drivers/scsi/hpsa.h static void SA5_submit_command(struct ctlr_info *h, h 422 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); h 423 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); h 426 drivers/scsi/hpsa.h static void SA5_submit_command_no_read(struct ctlr_info *h, h 429 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); h 432 drivers/scsi/hpsa.h static void SA5_submit_command_ioaccel2(struct ctlr_info *h, h 435 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); h 443 drivers/scsi/hpsa.h static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) h 446 drivers/scsi/hpsa.h h->interrupts_enabled = 1; h 447 drivers/scsi/hpsa.h writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 448 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 450 drivers/scsi/hpsa.h h->interrupts_enabled = 0; h 452 drivers/scsi/hpsa.h h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 453 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 460 drivers/scsi/hpsa.h static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val) h 463 drivers/scsi/hpsa.h h->interrupts_enabled = 1; h 464 drivers/scsi/hpsa.h writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 465 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 467 drivers/scsi/hpsa.h h->interrupts_enabled = 0; h 469 drivers/scsi/hpsa.h h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 470 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 474 drivers/scsi/hpsa.h static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) h 477 drivers/scsi/hpsa.h h->interrupts_enabled = 1; h 478 drivers/scsi/hpsa.h writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 479 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 481 drivers/scsi/hpsa.h h->interrupts_enabled = 0; h 483 drivers/scsi/hpsa.h h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 484 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); h 488 drivers/scsi/hpsa.h static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) h 490 drivers/scsi/hpsa.h struct reply_queue_buffer *rq = &h->reply_queue[q]; h 494 drivers/scsi/hpsa.h if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) { h 498 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_OUTDB_STATUS); h 499 drivers/scsi/hpsa.h writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); h 503 drivers/scsi/hpsa.h (void) readl(h->vaddr + SA5_OUTDB_STATUS); h 509 drivers/scsi/hpsa.h atomic_dec(&h->commands_outstanding); h 514 drivers/scsi/hpsa.h if (rq->current_entry == h->max_commands) { h 525 drivers/scsi/hpsa.h static unsigned long SA5_completed(struct ctlr_info *h, h 529 drivers/scsi/hpsa.h = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); h 532 drivers/scsi/hpsa.h atomic_dec(&h->commands_outstanding); h 536 drivers/scsi/hpsa.h dev_dbg(&h->pdev->dev, "Read %lx back from board\n", h 539 drivers/scsi/hpsa.h dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); h 547 drivers/scsi/hpsa.h static bool SA5_intr_pending(struct ctlr_info *h) h 550 drivers/scsi/hpsa.h readl(h->vaddr + SA5_INTR_STATUS); h 554 drivers/scsi/hpsa.h static bool SA5_performant_intr_pending(struct ctlr_info *h) h 556 drivers/scsi/hpsa.h unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); h 562 drivers/scsi/hpsa.h register_value = readl(h->vaddr + SA5_OUTDB_STATUS); h 568 drivers/scsi/hpsa.h static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) h 570 drivers/scsi/hpsa.h unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); h 579 drivers/scsi/hpsa.h static bool SA5B_intr_pending(struct ctlr_info *h) h 581 drivers/scsi/hpsa.h return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING; h 589 drivers/scsi/hpsa.h static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) h 592 drivers/scsi/hpsa.h struct reply_queue_buffer *rq = &h->reply_queue[q]; h 594 drivers/scsi/hpsa.h BUG_ON(q >= h->nreply_queues); h 608 drivers/scsi/hpsa.h writel((q << 24) | rq->current_entry, h->vaddr + h 610 drivers/scsi/hpsa.h atomic_dec(&h->commands_outstanding); h 432 drivers/scsi/hpsa_cmd.h struct ctlr_info *h; h 780 drivers/scsi/imm.c unsigned char l = 0, h = 0; h 896 drivers/scsi/imm.c imm_in(dev, &h, 1); h 666 drivers/scsi/isci/init.c struct isci_host *h = isci_host_alloc(pdev, i); h 668 drivers/scsi/isci/init.c if (!h) { h 672 drivers/scsi/isci/init.c pci_info->hosts[i] = h; h 219 drivers/scsi/ncr53c8xx.c struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1]; h 228 drivers/scsi/ncr53c8xx.c m_link_s *h = mp->h; h 239 drivers/scsi/ncr53c8xx.c while (!h[j].next) { h 241 drivers/scsi/ncr53c8xx.c h[j].next = (m_link_s *)mp->getp(mp); h 242 drivers/scsi/ncr53c8xx.c if (h[j].next) h 243 drivers/scsi/ncr53c8xx.c h[j].next->next = NULL; h 249 drivers/scsi/ncr53c8xx.c a = (m_addr_t) h[j].next; h 251 drivers/scsi/ncr53c8xx.c h[j].next = h[j].next->next; h 255 drivers/scsi/ncr53c8xx.c h[j].next = (m_link_s *) (a+s); h 256 drivers/scsi/ncr53c8xx.c h[j].next->next = NULL; h 271 drivers/scsi/ncr53c8xx.c m_link_s *h = mp->h; h 295 drivers/scsi/ncr53c8xx.c q = &h[i]; h 300 drivers/scsi/ncr53c8xx.c ((m_link_s *) a)->next = h[i].next; h 301 drivers/scsi/ncr53c8xx.c h[i].next = (m_link_s *) a; h 789 drivers/scsi/ncr53c8xx.c int c, h, t, u, v; h 793 drivers/scsi/ncr53c8xx.c h = -1; h 800 drivers/scsi/ncr53c8xx.c ++h; h 814 drivers/scsi/ncr53c8xx.c if (h == unit && h 151 drivers/scsi/nsp32_io.h unsigned long h,l; h 155 drivers/scsi/nsp32_io.h h = inw(base + DATA_REG_HI ); h 157 drivers/scsi/nsp32_io.h return ((h << 16) | l); h 164 drivers/scsi/nsp32_io.h unsigned long h,l; h 166 drivers/scsi/nsp32_io.h h = (val & 0xffff0000) >> 16; h 171 drivers/scsi/nsp32_io.h outw(h, base + DATA_REG_HI ); h 694 drivers/scsi/pcmcia/nsp_cs.c unsigned int l, m, h, dummy; h 700 drivers/scsi/pcmcia/nsp_cs.c h = nsp_index_read(base, TRANSFERCOUNT); h 703 drivers/scsi/pcmcia/nsp_cs.c count = (h << 16) | (m << 8) | (l << 0); h 297 drivers/scsi/pcmcia/nsp_cs.h static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); h 267 drivers/scsi/ppa.c unsigned char h; h 270 drivers/scsi/ppa.c h = r_str(base) & 0xf0; h 272 drivers/scsi/ppa.c *buffer++ = h | ((r_str(base) & 0xf0) >> 4); h 674 drivers/scsi/ppa.c unsigned char l = 0, h = 0; h 777 drivers/scsi/ppa.c ppa_in(dev, &h, 1); h 779 drivers/scsi/ppa.c (DID_OK << 16) + (h << 8) + (l & STATUS_MASK); h 202 drivers/scsi/qla2xxx/qla_init.c int rc, h; h 226 drivers/scsi/qla2xxx/qla_init.c for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h 227 drivers/scsi/qla2xxx/qla_init.c h++) { h 228 drivers/scsi/qla2xxx/qla_init.c if (sp->qpair->req->outstanding_cmds[h] == h 230 drivers/scsi/qla2xxx/qla_init.c sp->qpair->req->outstanding_cmds[h] = h 249 drivers/scsi/qla2xxx/qla_init.c for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h 250 drivers/scsi/qla2xxx/qla_init.c h++) { h 251 drivers/scsi/qla2xxx/qla_init.c if (sp->qpair->req->outstanding_cmds[h] == h 253 drivers/scsi/qla2xxx/qla_init.c sp->qpair->req->outstanding_cmds[h] = h 974 drivers/scsi/qla2xxx/qla_init.c struct list_head h; h 1014 drivers/scsi/qla2xxx/qla_init.c INIT_LIST_HEAD(&h); h 1017 drivers/scsi/qla2xxx/qla_init.c list_splice_init(&vha->gnl.fcports, &h); h 1020 drivers/scsi/qla2xxx/qla_init.c list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { h 274 drivers/scsi/qla2xxx/qla_inline.h struct qla_qpair_hint *h; h 276 drivers/scsi/qla2xxx/qla_inline.h list_for_each_entry(h, &qpair->hints_list, hint_elem) h 277 drivers/scsi/qla2xxx/qla_inline.h h->cpuid = qpair->cpuid; h 284 drivers/scsi/qla2xxx/qla_inline.h struct qla_qpair_hint *h; h 288 drivers/scsi/qla2xxx/qla_inline.h h = &tgt->qphints[i]; h 289 drivers/scsi/qla2xxx/qla_inline.h if (h->qpair == qpair) h 290 drivers/scsi/qla2xxx/qla_inline.h return h; h 1030 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 1057 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 1087 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 1105 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 1115 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 1122 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 1136 drivers/scsi/qla2xxx/qla_nx.h qla82xx_md_entry_hdr_t h; h 300 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 323 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 347 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 360 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 368 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 380 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 388 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 401 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 421 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 434 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 449 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 465 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 479 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 494 drivers/scsi/qla2xxx/qla_nx2.h struct qla8044_minidump_entry_hdr h; h 1569 drivers/scsi/qla2xxx/qla_sup.c #define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) h 1577 drivers/scsi/qla2xxx/qla_target.c struct qla_qpair_hint *h; h 1589 drivers/scsi/qla2xxx/qla_target.c h = &tgt->qphints[i]; h 1590 drivers/scsi/qla2xxx/qla_target.c if (h->qpair) { h 1591 drivers/scsi/qla2xxx/qla_target.c spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); h 1592 drivers/scsi/qla2xxx/qla_target.c list_del(&h->hint_elem); h 1593 drivers/scsi/qla2xxx/qla_target.c spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); h 1594 drivers/scsi/qla2xxx/qla_target.c h->qpair = NULL; h 1720 drivers/scsi/qla2xxx/qla_target.c uint32_t f_ctl, h; h 1741 drivers/scsi/qla2xxx/qla_target.c h = qlt_make_handle(qpair); h 1742 drivers/scsi/qla2xxx/qla_target.c if (unlikely(h == QLA_TGT_NULL_HANDLE)) { h 1750 drivers/scsi/qla2xxx/qla_target.c qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; h 1753 drivers/scsi/qla2xxx/qla_target.c resp->handle = MAKE_HANDLE(qpair->req->id, h); h 2001 drivers/scsi/qla2xxx/qla_target.c struct qla_qpair_hint *h = NULL; h 2004 drivers/scsi/qla2xxx/qla_target.c h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); h 2005 drivers/scsi/qla2xxx/qla_target.c if (!h) h 2006 drivers/scsi/qla2xxx/qla_target.c h = &tgt->qphints[0]; h 2008 drivers/scsi/qla2xxx/qla_target.c h = &tgt->qphints[0]; h 2011 drivers/scsi/qla2xxx/qla_target.c return h; h 2073 drivers/scsi/qla2xxx/qla_target.c struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; h 2092 drivers/scsi/qla2xxx/qla_target.c mcmd->qpair = h->qpair; h 2099 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = h->cpuid; h 2514 drivers/scsi/qla2xxx/qla_target.c uint32_t h; h 2519 drivers/scsi/qla2xxx/qla_target.c h = req->current_outstanding_cmd; h 2522 drivers/scsi/qla2xxx/qla_target.c h++; h 2523 drivers/scsi/qla2xxx/qla_target.c if (h == req->num_outstanding_cmds) h 2524 drivers/scsi/qla2xxx/qla_target.c h = 1; h 2526 drivers/scsi/qla2xxx/qla_target.c if (h == QLA_TGT_SKIP_HANDLE) h 2529 drivers/scsi/qla2xxx/qla_target.c if (!req->outstanding_cmds[h]) { h 2536 drivers/scsi/qla2xxx/qla_target.c req->current_outstanding_cmd = h; h 2541 drivers/scsi/qla2xxx/qla_target.c h = QLA_TGT_NULL_HANDLE; h 2544 drivers/scsi/qla2xxx/qla_target.c return h; h 2551 drivers/scsi/qla2xxx/qla_target.c uint32_t h; h 2564 drivers/scsi/qla2xxx/qla_target.c h = qlt_make_handle(qpair); h 2565 drivers/scsi/qla2xxx/qla_target.c if (unlikely(h == QLA_TGT_NULL_HANDLE)) { h 2573 drivers/scsi/qla2xxx/qla_target.c qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; h 2575 drivers/scsi/qla2xxx/qla_target.c pkt->handle = MAKE_HANDLE(qpair->req->id, h); h 2998 drivers/scsi/qla2xxx/qla_target.c uint32_t h; h 3077 drivers/scsi/qla2xxx/qla_target.c h = qlt_make_handle(qpair); h 3078 drivers/scsi/qla2xxx/qla_target.c if (unlikely(h == QLA_TGT_NULL_HANDLE)) { h 3086 drivers/scsi/qla2xxx/qla_target.c qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; h 3088 drivers/scsi/qla2xxx/qla_target.c pkt->handle = MAKE_HANDLE(qpair->req->id, h); h 3186 drivers/scsi/qla2xxx/qla_target.c qpair->req->outstanding_cmds[h] = NULL; h 3854 drivers/scsi/qla2xxx/qla_target.c uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; h 3856 drivers/scsi/qla2xxx/qla_target.c if (unlikely(h == QLA_TGT_SKIP_HANDLE)) h 3870 drivers/scsi/qla2xxx/qla_target.c h &= QLA_CMD_HANDLE_MASK; h 3872 drivers/scsi/qla2xxx/qla_target.c if (h != QLA_TGT_NULL_HANDLE) { h 3873 drivers/scsi/qla2xxx/qla_target.c if (unlikely(h >= req->num_outstanding_cmds)) { h 3880 drivers/scsi/qla2xxx/qla_target.c cmd = (void *) req->outstanding_cmds[h]; h 3887 drivers/scsi/qla2xxx/qla_target.c req->outstanding_cmds[h] = NULL; h 4192 drivers/scsi/qla2xxx/qla_target.c struct qla_qpair_hint *h; h 4195 drivers/scsi/qla2xxx/qla_target.c h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); h 4196 drivers/scsi/qla2xxx/qla_target.c if (unlikely(!h)) { h 4205 drivers/scsi/qla2xxx/qla_target.c h = qla_qpair_to_hint(tgt, qpair); h 4206 drivers/scsi/qla2xxx/qla_target.c BUG_ON(!h); h 4208 drivers/scsi/qla2xxx/qla_target.c cmd->unpacked_lun, h, GFP_ATOMIC); h 4220 drivers/scsi/qla2xxx/qla_target.c h = NULL; h 4225 drivers/scsi/qla2xxx/qla_target.c h = qla_qpair_to_hint(tgt, qp); h 4226 drivers/scsi/qla2xxx/qla_target.c BUG_ON(!h); h 4228 drivers/scsi/qla2xxx/qla_target.c cmd->unpacked_lun, h, GFP_ATOMIC); h 4247 drivers/scsi/qla2xxx/qla_target.c h = qla_qpair_to_hint(tgt, qpair); h 4248 drivers/scsi/qla2xxx/qla_target.c BUG_ON(!h); h 4250 drivers/scsi/qla2xxx/qla_target.c cmd->unpacked_lun, h, GFP_ATOMIC); h 4259 drivers/scsi/qla2xxx/qla_target.c h = &tgt->qphints[0]; h 4262 drivers/scsi/qla2xxx/qla_target.c cmd->qpair = h->qpair; h 4263 drivers/scsi/qla2xxx/qla_target.c cmd->se_cmd.cpuid = h->cpuid; h 4389 drivers/scsi/qla2xxx/qla_target.c struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; h 4409 drivers/scsi/qla2xxx/qla_target.c mcmd->qpair = h->qpair; h 4411 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = h->cpuid; h 4421 drivers/scsi/qla2xxx/qla_target.c h = qlt_find_qphint(vha, mcmd->unpacked_lun); h 4422 drivers/scsi/qla2xxx/qla_target.c mcmd->qpair = h->qpair; h 4423 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = h->cpuid; h 5701 drivers/scsi/qla2xxx/qla_target.c u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; h 5706 drivers/scsi/qla2xxx/qla_target.c if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { h 6330 drivers/scsi/qla2xxx/qla_target.c struct qla_qpair_hint *h; h 6374 drivers/scsi/qla2xxx/qla_target.c h = &tgt->qphints[0]; h 6375 drivers/scsi/qla2xxx/qla_target.c h->qpair = ha->base_qpair; h 6376 drivers/scsi/qla2xxx/qla_target.c INIT_LIST_HEAD(&h->hint_elem); h 6377 drivers/scsi/qla2xxx/qla_target.c h->cpuid = ha->base_qpair->cpuid; h 6378 drivers/scsi/qla2xxx/qla_target.c list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); h 6385 drivers/scsi/qla2xxx/qla_target.c h = &tgt->qphints[i + 1]; h 6386 drivers/scsi/qla2xxx/qla_target.c INIT_LIST_HEAD(&h->hint_elem); h 6388 drivers/scsi/qla2xxx/qla_target.c h->qpair = qpair; h 6390 drivers/scsi/qla2xxx/qla_target.c list_add_tail(&h->hint_elem, &qpair->hints_list); h 6392 drivers/scsi/qla2xxx/qla_target.c h->cpuid = qpair->cpuid; h 61 drivers/scsi/qla2xxx/qla_target.h #define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK) h 245 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 258 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 273 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 289 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 303 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 318 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 349 drivers/scsi/qla4xxx/ql4_83xx.h struct qla8xxx_minidump_entry_hdr h; h 904 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 927 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 951 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 964 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 972 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 980 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 993 drivers/scsi/qla4xxx/ql4_nx.h struct qla8xxx_minidump_entry_hdr h; h 159 drivers/scsi/qla4xxx/ql4_os.c static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); h 107 drivers/scsi/qlogicfas408.h int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd); h 173 drivers/scsi/scsi_priv.h static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } h 174 drivers/scsi/scsi_priv.h static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} h 1027 drivers/scsi/stex.c struct handshake_frame *h; h 1059 drivers/scsi/stex.c h = (struct handshake_frame *)hba->status_buffer; h 1060 drivers/scsi/stex.c h->rb_phy = cpu_to_le64(hba->dma_handle); h 1061 drivers/scsi/stex.c h->req_sz = cpu_to_le16(hba->rq_size); h 1062 drivers/scsi/stex.c h->req_cnt = cpu_to_le16(hba->rq_count+1); h 1063 drivers/scsi/stex.c h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h 1064 drivers/scsi/stex.c h->status_cnt = cpu_to_le16(hba->sts_count+1); h 1065 drivers/scsi/stex.c h->hosttime = cpu_to_le64(ktime_get_real_seconds()); h 1066 drivers/scsi/stex.c h->partner_type = HMU_PARTNER_TYPE; h 1068 drivers/scsi/stex.c h->extra_offset = cpu_to_le32(hba->extra_offset); h 1069 drivers/scsi/stex.c h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); h 1071 drivers/scsi/stex.c h->extra_offset = h->extra_size = 0; h 1112 drivers/scsi/stex.c struct handshake_frame *h; h 1150 drivers/scsi/stex.c h = (struct handshake_frame *)(msg_h + 1); h 1151 drivers/scsi/stex.c h->rb_phy = cpu_to_le64(hba->dma_handle); h 1152 drivers/scsi/stex.c h->req_sz = cpu_to_le16(hba->rq_size); h 1153 drivers/scsi/stex.c h->req_cnt = cpu_to_le16(hba->rq_count+1); h 1154 drivers/scsi/stex.c h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h 1155 drivers/scsi/stex.c h->status_cnt = cpu_to_le16(hba->sts_count+1); h 1156 drivers/scsi/stex.c h->hosttime = cpu_to_le64(ktime_get_real_seconds()); h 1157 drivers/scsi/stex.c h->partner_type = HMU_PARTNER_TYPE; h 1158 drivers/scsi/stex.c h->extra_offset = h->extra_size = 0; h 1160 drivers/scsi/stex.c h->scratch_size = cpu_to_le32(scratch_size); h 1302 drivers/scsi/sym53c8xx_2/sym_hipd.c int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) h 1311 drivers/scsi/sym53c8xx_2/sym_hipd.c if (h == np->dmap_bah[i]) h 1326 drivers/scsi/sym53c8xx_2/sym_hipd.c np->dmap_bah[s] = h; h 1087 drivers/scsi/sym53c8xx_2/sym_hipd.h int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s); h 1091 drivers/scsi/sym53c8xx_2/sym_hipd.h u32 h = (badd>>32); h 1092 drivers/scsi/sym53c8xx_2/sym_hipd.h int s = (h&SYM_DMAP_MASK); h 1094 drivers/scsi/sym53c8xx_2/sym_hipd.h if (h != np->dmap_bah[s]) h 1101 drivers/scsi/sym53c8xx_2/sym_hipd.h s = sym_lookup_dmap(np, h, s); h 1161 drivers/scsi/sym53c8xx_2/sym_hipd.h struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1]; h 53 drivers/scsi/sym53c8xx_2/sym_malloc.c m_link_p h = mp->h; h 64 drivers/scsi/sym53c8xx_2/sym_malloc.c while (!h[j].next) { h 66 drivers/scsi/sym53c8xx_2/sym_malloc.c h[j].next = (m_link_p) M_GET_MEM_CLUSTER(); h 67 drivers/scsi/sym53c8xx_2/sym_malloc.c if (h[j].next) h 68 drivers/scsi/sym53c8xx_2/sym_malloc.c h[j].next->next = NULL; h 74 drivers/scsi/sym53c8xx_2/sym_malloc.c a = h[j].next; h 76 drivers/scsi/sym53c8xx_2/sym_malloc.c h[j].next = h[j].next->next; h 80 drivers/scsi/sym53c8xx_2/sym_malloc.c h[j].next = (m_link_p) (a+s); h 81 drivers/scsi/sym53c8xx_2/sym_malloc.c h[j].next->next = NULL; h 99 drivers/scsi/sym53c8xx_2/sym_malloc.c m_link_p h = mp->h; h 120 drivers/scsi/sym53c8xx_2/sym_malloc.c ((m_link_p) a)->next = h[i].next; h 121 drivers/scsi/sym53c8xx_2/sym_malloc.c h[i].next = (m_link_p) a; h 126 drivers/scsi/sym53c8xx_2/sym_malloc.c q = &h[i]; h 131 drivers/scsi/sym53c8xx_2/sym_malloc.c ((m_link_p) a)->next = h[i].next; h 132 drivers/scsi/sym53c8xx_2/sym_malloc.c h[i].next = (m_link_p) a; h 161 drivers/scsi/ufs/ufshcd.c #define ufshcd_set_eh_in_progress(h) \ h 162 drivers/scsi/ufs/ufshcd.c ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) h 163 drivers/scsi/ufs/ufshcd.c #define ufshcd_eh_in_progress(h) \ h 164 drivers/scsi/ufs/ufshcd.c ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) h 165 drivers/scsi/ufs/ufshcd.c #define ufshcd_clear_eh_in_progress(h) \ h 166 drivers/scsi/ufs/ufshcd.c ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) h 168 drivers/scsi/ufs/ufshcd.c #define ufshcd_set_ufs_dev_active(h) \ h 169 drivers/scsi/ufs/ufshcd.c ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) h 170 drivers/scsi/ufs/ufshcd.c #define ufshcd_set_ufs_dev_sleep(h) \ h 171 drivers/scsi/ufs/ufshcd.c ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) h 172 drivers/scsi/ufs/ufshcd.c #define ufshcd_set_ufs_dev_poweroff(h) \ h 173 drivers/scsi/ufs/ufshcd.c ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) h 174 drivers/scsi/ufs/ufshcd.c #define ufshcd_is_ufs_dev_active(h) \ h 175 drivers/scsi/ufs/ufshcd.c ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) h 176 drivers/scsi/ufs/ufshcd.c #define ufshcd_is_ufs_dev_sleep(h) \ h 177 drivers/scsi/ufs/ufshcd.c ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) h 178 drivers/scsi/ufs/ufshcd.c #define ufshcd_is_ufs_dev_poweroff(h) \ h 179 drivers/scsi/ufs/ufshcd.c ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) h 336 drivers/scsi/wd33c93.h int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); h 75 drivers/sh/intc/access.c static unsigned long test_8(unsigned long addr, unsigned long h, h 79 drivers/sh/intc/access.c return intc_get_field_from_handle(__raw_readb(ptr), h); h 82 drivers/sh/intc/access.c static unsigned long test_16(unsigned long addr, unsigned long h, h 86 drivers/sh/intc/access.c return intc_get_field_from_handle(__raw_readw(ptr), h); h 89 drivers/sh/intc/access.c static unsigned long test_32(unsigned long addr, unsigned long h, h 93 drivers/sh/intc/access.c return intc_get_field_from_handle(__raw_readl(ptr), h); h 96 drivers/sh/intc/access.c static unsigned long write_8(unsigned long addr, unsigned long h, h 100 drivers/sh/intc/access.c __raw_writeb(intc_set_field_from_handle(0, data, h), ptr); h 105 drivers/sh/intc/access.c static unsigned long write_16(unsigned long addr, unsigned long h, h 109 drivers/sh/intc/access.c __raw_writew(intc_set_field_from_handle(0, data, h), ptr); h 114 drivers/sh/intc/access.c static unsigned long write_32(unsigned long addr, unsigned long h, h 118 drivers/sh/intc/access.c __raw_writel(intc_set_field_from_handle(0, data, h), ptr); h 123 drivers/sh/intc/access.c static unsigned long modify_8(unsigned long addr, unsigned long h, h 130 drivers/sh/intc/access.c value = intc_set_field_from_handle(__raw_readb(ptr), data, h); h 137 drivers/sh/intc/access.c static unsigned long modify_16(unsigned long addr, unsigned long h, h 144 drivers/sh/intc/access.c value = intc_set_field_from_handle(__raw_readw(ptr), data, h); h 151 drivers/sh/intc/access.c static unsigned long modify_32(unsigned long addr, unsigned long h, h 158 drivers/sh/intc/access.c value = intc_set_field_from_handle(__raw_readl(ptr), data, h); h 196 drivers/sh/intc/access.c unsigned long h, h 15 drivers/sh/intc/internals.h #define _INTC_SHIFT(h) (h & 0x1f) h 16 drivers/sh/intc/internals.h #define _INTC_WIDTH(h) ((h >> 5) & 0xf) h 17 drivers/sh/intc/internals.h #define _INTC_FN(h) ((h >> 9) & 0xf) h 18 drivers/sh/intc/internals.h #define _INTC_MODE(h) ((h >> 13) & 0x7) h 19 drivers/sh/intc/internals.h #define _INTC_ADDR_E(h) ((h >> 16) & 0xff) h 20 drivers/sh/intc/internals.h #define _INTC_ADDR_D(h) ((h >> 24) & 0xff) h 116 drivers/sh/intc/internals.h (*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data); h 387 drivers/soc/fsl/qbman/qman_test_stash.c static int init_handler(void *h) h 390 drivers/soc/fsl/qbman/qman_test_stash.c struct hp_handler *handler = h; h 433 drivers/soc/fsl/qbman/qman_test_stash.c static void init_handler_cb(void *h) h 435 drivers/soc/fsl/qbman/qman_test_stash.c if (init_handler(h)) h 244 drivers/soc/fsl/qe/qe_ic.c static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, h 248 drivers/soc/fsl/qe/qe_ic.c struct device_node *of_node = irq_domain_get_of_node(h); h 252 drivers/soc/fsl/qe/qe_ic.c static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, h 255 drivers/soc/fsl/qe/qe_ic.c struct qe_ic *qe_ic = h->host_data; h 44 drivers/soc/qcom/trace-rpmh.h TP_PROTO(struct rsc_drv *d, int m, int n, u32 h, h 47 drivers/soc/qcom/trace-rpmh.h TP_ARGS(d, m, n, h, c), h 63 drivers/soc/qcom/trace-rpmh.h __entry->hdr = h; h 812 drivers/staging/fieldbus/anybuss/host.c struct anybus_mbox_hdr *h; h 819 drivers/staging/fieldbus/anybuss/host.c if (ext && ext_sz > sizeof(h->extended)) h 825 drivers/staging/fieldbus/anybuss/host.c h = &pd->hdr; h 831 drivers/staging/fieldbus/anybuss/host.c memset(h, 0, sizeof(*h)); h 832 drivers/staging/fieldbus/anybuss/host.c h->info = cpu_to_be16(info | INFO_COMMAND); h 833 drivers/staging/fieldbus/anybuss/host.c h->cmd_num = cpu_to_be16(cmd_num); h 834 drivers/staging/fieldbus/anybuss/host.c h->data_size = cpu_to_be16(msg_out_sz); h 835 drivers/staging/fieldbus/anybuss/host.c h->frame_count = cpu_to_be16(1); h 836 drivers/staging/fieldbus/anybuss/host.c h->frame_num = cpu_to_be16(1); h 837 drivers/staging/fieldbus/anybuss/host.c h->offset_high = cpu_to_be16(0); h 838 drivers/staging/fieldbus/anybuss/host.c h->offset_low = cpu_to_be16(0); h 840 drivers/staging/fieldbus/anybuss/host.c memcpy(h->extended, ext, ext_sz); h 31 drivers/staging/media/hantro/hantro.h #define VP8_MB_HEIGHT(h) DIV_ROUND_UP(h, VP8_MB_DIM) h 35 drivers/staging/media/hantro/hantro.h #define H264_MB_HEIGHT(h) DIV_ROUND_UP(h, H264_MB_DIM) h 39 drivers/staging/media/hantro/hantro.h #define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM) h 43 drivers/staging/media/hantro/hantro.h #define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM) h 39 drivers/staging/media/hantro/hantro_h1_regs.h #define H1_REG_ENC_CTRL_HEIGHT(h) ((h) << 10) h 517 drivers/staging/media/ipu3/ipu3-css-params.c u32 h = reso.pin_height[IMGU_ABI_OSYS_PIN_VF] - h 523 drivers/staging/media/ipu3/ipu3-css-params.c roundclosest_down(h / 2, IMGU_OSYS_DMA_CROP_H_LIMIT); h 1266 drivers/staging/media/ipu3/ipu3-css.c unsigned int w, h; h 1288 drivers/staging/media/ipu3/ipu3-css.c h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height; h 1293 drivers/staging/media/ipu3/ipu3-css.c size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2; h 1313 drivers/staging/media/ipu3/ipu3-css.c h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height; h 1314 drivers/staging/media/ipu3/ipu3-css.c size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */ h 58 drivers/staging/media/ipu3/ipu3-css.h u32 h; h 13 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define SHIFT_AND_MASK_BITS(v, h, l) \ h 14 drivers/staging/media/sunxi/cedrus/cedrus_regs.h (((unsigned long)(v) << (l)) & GENMASK(h, l)) h 105 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \ h 106 drivers/staging/media/sunxi/cedrus/cedrus_regs.h SHIFT_AND_MASK_BITS(DIV_ROUND_UP((h), 16), 7, 0) h 111 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0) h 116 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0) h 115 drivers/staging/sm750fb/sm750.h int h; h 61 drivers/staging/sm750fb/sm750_cursor.c void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h) h 64 drivers/staging/sm750fb/sm750_cursor.c cursor->h = h; h 97 drivers/staging/sm750fb/sm750_cursor.c count = pitch * cursor->h; h 147 drivers/staging/sm750fb/sm750_cursor.c count = pitch * cursor->h; h 9 drivers/staging/sm750fb/sm750_cursor.h int w, int h); h 316 drivers/staging/uwb/neh.c struct uwb_rc_neh *neh = NULL, *h; h 321 drivers/staging/uwb/neh.c list_for_each_entry(h, &rc->neh_list, list_node) { h 322 drivers/staging/uwb/neh.c if (uwb_rc_neh_match(h, rceb)) { h 323 drivers/staging/uwb/neh.c neh = h; h 364 drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h struct mmal_msg_header h; h 86 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg_type_names[(MSG)->h.type], \ h 87 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c (MSG)->h.type, (MSG_LEN)); \ h 100 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg_type_names[(MSG)->h.type], \ h 101 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c (MSG)->h.type, (MSG_LEN)); \ h 397 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST; h 398 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.magic = MMAL_MAGIC; h 399 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.context = msg_context->handle; h 400 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.status = 0; h 465 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) { h 467 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c pr_warn("error %d in reply\n", msg->h.status); h 469 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg_context->u.bulk.status = msg->h.status; h 565 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c switch (msg->h.type) { h 583 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (!msg->h.context) { h 590 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg->h.context); h 593 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg->h.context); h 664 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg->h.magic = MMAL_MAGIC; h 665 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg->h.context = msg_context->handle; h 666 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c msg->h.status = 0; h 776 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET; h 805 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) { h 833 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET; h 844 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) { h 929 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE; h 940 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != m.h.type) { h 975 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY; h 984 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != m.h.type) { h 1008 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE; h 1017 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != m.h.type) { h 1040 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE; h 1049 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != m.h.type) { h 1073 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_GET_VERSION; h 1081 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != m.h.type) { h 1106 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_PORT_ACTION; h 1119 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) { h 1150 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_PORT_ACTION; h 1166 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) { h 1195 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET; h 1209 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) { h 1236 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET; h 1250 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) { h 1252 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c pr_err("Incorrect reply type %d\n", rmsg->h.type); h 177 drivers/thermal/intel/x86_pkg_temp_thermal.c u32 l, h, mask, shift, intr; h 184 drivers/thermal/intel/x86_pkg_temp_thermal.c &l, &h); h 210 drivers/thermal/intel/x86_pkg_temp_thermal.c l, h); h 237 drivers/thermal/intel/x86_pkg_temp_thermal.c u32 l, h; h 239 drivers/thermal/intel/x86_pkg_temp_thermal.c rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 247 drivers/thermal/intel/x86_pkg_temp_thermal.c wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 253 drivers/thermal/intel/x86_pkg_temp_thermal.c u32 l, h; h 255 drivers/thermal/intel/x86_pkg_temp_thermal.c rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 258 drivers/thermal/intel/x86_pkg_temp_thermal.c wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); h 1195 drivers/thermal/tegra/soctherm.c static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq, h 1198 drivers/thermal/tegra/soctherm.c struct soctherm_oc_irq_chip_data *data = h->host_data; h 110 drivers/thunderbolt/path.c int ret, i, h; h 126 drivers/thunderbolt/path.c h = src_hopid; h 132 drivers/thunderbolt/path.c ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2); h 134 drivers/thunderbolt/path.c tb_port_warn(p, "failed to read path at %d\n", h); h 146 drivers/thunderbolt/path.c h = hop.next_hop; h 167 drivers/thunderbolt/path.c h = src_hopid; h 174 drivers/thunderbolt/path.c ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2); h 176 drivers/thunderbolt/path.c tb_port_warn(p, "failed to read path at %d\n", h); h 180 drivers/thunderbolt/path.c if (tb_port_alloc_in_hopid(p, h, h) < 0) h 187 drivers/thunderbolt/path.c tb_port_release_in_hopid(p, h); h 192 drivers/thunderbolt/path.c path->hops[i].in_hop_index = h; h 197 drivers/thunderbolt/path.c h = next_hop; h 3402 drivers/tty/cyclades.c const struct zfile_header *h = ptr; h 3407 drivers/tty/cyclades.c if (len < sizeof(*h)) { h 3408 drivers/tty/cyclades.c printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h)); h 3412 drivers/tty/cyclades.c cs = ptr + h->config_offset; h 3413 drivers/tty/cyclades.c bs = ptr + h->block_offset; h 3415 drivers/tty/cyclades.c if ((void *)(cs + h->n_config) > ptr + len || h 3416 drivers/tty/cyclades.c (void *)(bs + h->n_blocks) > ptr + len) { h 3421 drivers/tty/cyclades.c if (cyc_isfwstr(h->name, sizeof(h->name)) || h 3422 drivers/tty/cyclades.c cyc_isfwstr(h->date, sizeof(h->date))) { h 3427 drivers/tty/cyclades.c if (strncmp(name, h->name, sizeof(h->name))) { h 3428 drivers/tty/cyclades.c printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name); h 3433 drivers/tty/cyclades.c for (c = cs; c < cs + h->n_config; c++) { h 3435 drivers/tty/cyclades.c if (c->block_list[a] > h->n_blocks) { h 3447 drivers/tty/cyclades.c for (b = bs; b < bs + h->n_blocks; b++) h 3454 drivers/tty/cyclades.c for (c = cs; c < cs + h->n_config; c++) h 795 drivers/tty/n_gsm.c int h = dlci->adaption - 1; h 807 drivers/tty/n_gsm.c size = len + h; h 174 drivers/tty/serial/8250/8250_core.c struct hlist_head *h; h 181 drivers/tty/serial/8250/8250_core.c h = &irq_lists[up->port.irq % NR_IRQ_HASH]; h 183 drivers/tty/serial/8250/8250_core.c hlist_for_each(n, h) { h 197 drivers/tty/serial/8250/8250_core.c hlist_add_head(&i->node, h); h 229 drivers/tty/serial/8250/8250_core.c struct hlist_head *h; h 233 drivers/tty/serial/8250/8250_core.c h = &irq_lists[up->port.irq % NR_IRQ_HASH]; h 235 drivers/tty/serial/8250/8250_core.c hlist_for_each(n, h) { h 561 drivers/tty/serial/arc_uart.c unsigned int l, h, hw_val; h 568 drivers/tty/serial/arc_uart.c h = (hw_val >> 8) & 0xFF; h 571 drivers/tty/serial/arc_uart.c UART_SET_BAUDH(port, h); h 812 drivers/tty/vt/consolemap.c int h; h 837 drivers/tty/vt/consolemap.c (h = p2[ucs & 0x3f]) < MAX_GLYPH) h 838 drivers/tty/vt/consolemap.c return h; h 4543 drivers/tty/vt/vt.c int h, i; h 4555 drivers/tty/vt/vt.c for (h = 32; h > 0; h--) h 4557 drivers/tty/vt/vt.c if (charmap[32*i+h-1]) h 4564 drivers/tty/vt/vt.c op->height = h; h 1160 drivers/usb/class/cdc-acm.c struct usb_cdc_parsed_header h; h 1181 drivers/usb/class/cdc-acm.c memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); h 1219 drivers/usb/class/cdc-acm.c cdc_parse_cdc_header(&h, intf, buffer, buflen); h 1220 drivers/usb/class/cdc-acm.c union_header = h.usb_cdc_union_desc; h 1221 drivers/usb/class/cdc-acm.c cmgmd = h.usb_cdc_call_mgmt_descriptor; h 1350 drivers/usb/class/cdc-acm.c if (h.usb_cdc_acm_descriptor) h 1351 drivers/usb/class/cdc-acm.c acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities; h 1443 drivers/usb/class/cdc-acm.c if (h.usb_cdc_country_functional_desc) { /* export the country data */ h 1445 drivers/usb/class/cdc-acm.c h.usb_cdc_country_functional_desc; h 30 drivers/usb/core/config.c struct usb_descriptor_header *h; h 36 drivers/usb/core/config.c h = (struct usb_descriptor_header *) buffer; h 37 drivers/usb/core/config.c if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2) h 39 drivers/usb/core/config.c buffer += h->bLength; h 40 drivers/usb/core/config.c size -= h->bLength; h 2024 drivers/usb/gadget/function/f_fs.c struct usb_os_desc_header *h, void *data, h 2292 drivers/usb/gadget/function/f_fs.c struct usb_os_desc_header *h) h 2301 drivers/usb/gadget/function/f_fs.c ret = entity(type, h, data, len, priv); h 2378 drivers/usb/gadget/function/f_fs.c struct usb_os_desc_header *h, void *data, h 2416 drivers/usb/gadget/function/f_fs.c if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) h 2965 drivers/usb/gadget/function/f_fs.c struct usb_os_desc_header *h, void *data, h 2991 drivers/usb/gadget/function/f_fs.c t = &func->function.os_desc_table[h->interface]; h 2992 drivers/usb/gadget/function/f_fs.c t->if_id = func->interfaces_nums[h->interface]; h 225 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_control_header *h; h 227 drivers/usb/gadget/function/uvc_configfs.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 228 drivers/usb/gadget/function/uvc_configfs.c if (!h) h 231 drivers/usb/gadget/function/uvc_configfs.c h->desc.bLength = UVC_DT_HEADER_SIZE(1); h 232 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorType = USB_DT_CS_INTERFACE; h 233 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorSubType = UVC_VC_HEADER; h 234 drivers/usb/gadget/function/uvc_configfs.c h->desc.bcdUVC = cpu_to_le16(0x0100); h 235 drivers/usb/gadget/function/uvc_configfs.c h->desc.dwClockFrequency = cpu_to_le32(48000000); h 237 drivers/usb/gadget/function/uvc_configfs.c config_item_init_type_name(&h->item, name, &uvcg_control_header_type); h 239 drivers/usb/gadget/function/uvc_configfs.c return &h->item; h 1033 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_streaming_header *h; h 1035 drivers/usb/gadget/function/uvc_configfs.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 1036 drivers/usb/gadget/function/uvc_configfs.c if (!h) h 1039 drivers/usb/gadget/function/uvc_configfs.c INIT_LIST_HEAD(&h->formats); h 1040 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorType = USB_DT_CS_INTERFACE; h 1041 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorSubType = UVC_VS_INPUT_HEADER; h 1042 drivers/usb/gadget/function/uvc_configfs.c h->desc.bTerminalLink = 3; h 1043 drivers/usb/gadget/function/uvc_configfs.c h->desc.bControlSize = UVCG_STREAMING_CONTROL_SIZE; h 1045 drivers/usb/gadget/function/uvc_configfs.c config_item_init_type_name(&h->item, name, &uvcg_streaming_header_type); h 1047 drivers/usb/gadget/function/uvc_configfs.c return &h->item; h 1349 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_frame *h; h 1354 drivers/usb/gadget/function/uvc_configfs.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 1355 drivers/usb/gadget/function/uvc_configfs.c if (!h) h 1358 drivers/usb/gadget/function/uvc_configfs.c h->frame.b_descriptor_type = USB_DT_CS_INTERFACE; h 1359 drivers/usb/gadget/function/uvc_configfs.c h->frame.b_frame_index = 1; h 1360 drivers/usb/gadget/function/uvc_configfs.c h->frame.w_width = 640; h 1361 drivers/usb/gadget/function/uvc_configfs.c h->frame.w_height = 360; h 1362 drivers/usb/gadget/function/uvc_configfs.c h->frame.dw_min_bit_rate = 18432000; h 1363 drivers/usb/gadget/function/uvc_configfs.c h->frame.dw_max_bit_rate = 55296000; h 1364 drivers/usb/gadget/function/uvc_configfs.c h->frame.dw_max_video_frame_buffer_size = 460800; h 1365 drivers/usb/gadget/function/uvc_configfs.c h->frame.dw_default_frame_interval = 666666; h 1373 drivers/usb/gadget/function/uvc_configfs.c h->frame.b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED; h 1374 drivers/usb/gadget/function/uvc_configfs.c h->fmt_type = UVCG_UNCOMPRESSED; h 1376 drivers/usb/gadget/function/uvc_configfs.c h->frame.b_descriptor_subtype = UVC_VS_FRAME_MJPEG; h 1377 drivers/usb/gadget/function/uvc_configfs.c h->fmt_type = UVCG_MJPEG; h 1380 drivers/usb/gadget/function/uvc_configfs.c kfree(h); h 1386 drivers/usb/gadget/function/uvc_configfs.c config_item_init_type_name(&h->item, name, &uvcg_frame_type); h 1388 drivers/usb/gadget/function/uvc_configfs.c return &h->item; h 1636 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_uncompressed *h; h 1638 drivers/usb/gadget/function/uvc_configfs.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 1639 drivers/usb/gadget/function/uvc_configfs.c if (!h) h 1642 drivers/usb/gadget/function/uvc_configfs.c h->desc.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE; h 1643 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorType = USB_DT_CS_INTERFACE; h 1644 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED; h 1645 drivers/usb/gadget/function/uvc_configfs.c memcpy(h->desc.guidFormat, guid, sizeof(guid)); h 1646 drivers/usb/gadget/function/uvc_configfs.c h->desc.bBitsPerPixel = 16; h 1647 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDefaultFrameIndex = 1; h 1648 drivers/usb/gadget/function/uvc_configfs.c h->desc.bAspectRatioX = 0; h 1649 drivers/usb/gadget/function/uvc_configfs.c h->desc.bAspectRatioY = 0; h 1650 drivers/usb/gadget/function/uvc_configfs.c h->desc.bmInterfaceFlags = 0; h 1651 drivers/usb/gadget/function/uvc_configfs.c h->desc.bCopyProtect = 0; h 1653 drivers/usb/gadget/function/uvc_configfs.c h->fmt.type = UVCG_UNCOMPRESSED; h 1654 drivers/usb/gadget/function/uvc_configfs.c config_group_init_type_name(&h->fmt.group, name, h 1657 drivers/usb/gadget/function/uvc_configfs.c return &h->fmt.group; h 1824 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_mjpeg *h; h 1826 drivers/usb/gadget/function/uvc_configfs.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 1827 drivers/usb/gadget/function/uvc_configfs.c if (!h) h 1830 drivers/usb/gadget/function/uvc_configfs.c h->desc.bLength = UVC_DT_FORMAT_MJPEG_SIZE; h 1831 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorType = USB_DT_CS_INTERFACE; h 1832 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDescriptorSubType = UVC_VS_FORMAT_MJPEG; h 1833 drivers/usb/gadget/function/uvc_configfs.c h->desc.bDefaultFrameIndex = 1; h 1834 drivers/usb/gadget/function/uvc_configfs.c h->desc.bAspectRatioX = 0; h 1835 drivers/usb/gadget/function/uvc_configfs.c h->desc.bAspectRatioY = 0; h 1836 drivers/usb/gadget/function/uvc_configfs.c h->desc.bmInterfaceFlags = 0; h 1837 drivers/usb/gadget/function/uvc_configfs.c h->desc.bCopyProtect = 0; h 1839 drivers/usb/gadget/function/uvc_configfs.c h->fmt.type = UVCG_MJPEG; h 1840 drivers/usb/gadget/function/uvc_configfs.c config_group_init_type_name(&h->fmt.group, name, h 1843 drivers/usb/gadget/function/uvc_configfs.c return &h->fmt.group; h 1982 drivers/usb/gadget/function/uvc_configfs.c static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h, h 1996 drivers/usb/gadget/function/uvc_configfs.c ret = fun(h, priv2, priv3, 0, UVCG_HEADER); h 1999 drivers/usb/gadget/function/uvc_configfs.c list_for_each_entry(f, &h->formats, entry) { h 2031 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_streaming_header *h = priv1; h 2033 drivers/usb/gadget/function/uvc_configfs.c *size += sizeof(h->desc); h 2035 drivers/usb/gadget/function/uvc_configfs.c *size += h->num_fmt * UVCG_STREAMING_CONTROL_SIZE; h 2092 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_streaming_header *h = priv1; h 2095 drivers/usb/gadget/function/uvc_configfs.c memcpy(*dest, &h->desc, sizeof(h->desc)); h 2096 drivers/usb/gadget/function/uvc_configfs.c *dest += sizeof(h->desc); h 2098 drivers/usb/gadget/function/uvc_configfs.c list_for_each_entry(f, &h->formats, entry) { h 2102 drivers/usb/gadget/function/uvc_configfs.c ihdr->bLength = sizeof(h->desc) + h->num_fmt * sz; h 2103 drivers/usb/gadget/function/uvc_configfs.c ihdr->bNumFormats = h->num_fmt; h 2133 drivers/usb/gadget/function/uvc_configfs.c struct uvc_descriptor_header *h = *dest; h 2143 drivers/usb/gadget/function/uvc_configfs.c h->bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE( h 2146 drivers/usb/gadget/function/uvc_configfs.c h->bLength = UVC_DT_FRAME_MJPEG_SIZE( h 29 drivers/usb/host/ehci-atmel.c #define hcd_to_atmel_ehci_priv(h) \ h 30 drivers/usb/host/ehci-atmel.c ((struct atmel_ehci_priv *)hcd_to_ehci(h)->priv) h 26 drivers/usb/host/ehci-mv.c #define hcd_to_ehci_hcd_mv(h) ((struct ehci_hcd_mv *)hcd_to_ehci(h)->priv) h 61 drivers/usb/host/ehci-orion.c #define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv) h 41 drivers/usb/host/ehci-platform.c #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv) h 42 drivers/usb/host/ehci-st.c #define hcd_to_ehci_priv(h) \ h 43 drivers/usb/host/ehci-st.c ((struct st_ehci_platform_priv *)hcd_to_ehci(h)->priv) h 347 drivers/usb/host/isp116x.h #define isp116x_delay(h,d) (h)->board->delay( \ h 348 drivers/usb/host/isp116x.h isp116x_to_hcd(h)->self.controller,d) h 349 drivers/usb/host/isp116x.h #define isp116x_check_platform_delay(h) ((h)->board->delay == NULL) h 351 drivers/usb/host/isp116x.h #define isp116x_delay(h,d) ndelay(d) h 352 drivers/usb/host/isp116x.h #define isp116x_check_platform_delay(h) 0 h 354 drivers/usb/host/isp116x.h #define isp116x_delay(h,d) do{}while(0) h 355 drivers/usb/host/isp116x.h #define isp116x_check_platform_delay(h) 0 h 569 drivers/usb/host/isp1362.h #define isp1362_delay(h, d) (h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d) h 571 drivers/usb/host/isp1362.h #define isp1362_delay(h, d) ndelay(d) h 573 drivers/usb/host/isp1362.h #define isp1362_delay(h, d) do {} while (0) h 38 drivers/usb/host/ohci-at91.c #define hcd_to_ohci_at91_priv(h) \ h 39 drivers/usb/host/ohci-at91.c ((struct ohci_at91_priv *)hcd_to_ohci(h)->priv) h 36 drivers/usb/host/ohci-platform.c #define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv) h 40 drivers/usb/host/ohci-st.c #define hcd_to_ohci_priv(h) \ h 41 drivers/usb/host/ohci-st.c ((struct st_ohci_platform_priv *)hcd_to_ohci(h)->priv) h 21 drivers/usb/host/xhci-plat.h #define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv) h 104 drivers/usb/renesas_usbhs/mod_host.c #define __usbhsh_for_each_udev(start, pos, h, i) \ h 106 drivers/usb/renesas_usbhs/mod_host.c ((i) < USBHSH_DEVICE_MAX) && ((pos) = (h)->udev + (i)); \ h 115 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_hcd_to_hpriv(h) (struct usbhsh_hpriv *)((h)->hcd_priv) h 116 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_hcd_to_dev(h) ((h)->self.controller) h 118 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_hpriv_to_priv(h) ((h)->mod.priv) h 119 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_hpriv_to_dcp(h) ((h)->dcp) h 120 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_hpriv_to_hcd(h) \ h 121 drivers/usb/renesas_usbhs/mod_host.c container_of((void *)h, struct usb_hcd, hcd_priv) h 133 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_udev_to_usbv(h) ((h)->usbv) h 134 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_udev_is_used(h) usbhsh_udev_to_usbv(h) h 140 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_device_number(h, d) ((int)((d) - (h)->udev)) h 141 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_device_nth(h, d) ((h)->udev + d) h 142 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_device0(h) usbhsh_device_nth(h, 0) h 144 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_port_stat_init(h) ((h)->port_stat = 0) h 145 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_port_stat_set(h, s) ((h)->port_stat |= (s)) h 146 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_port_stat_clear(h, s) ((h)->port_stat &= ~(s)) h 147 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_port_stat_get(h) ((h)->port_stat) h 505 drivers/video/console/newport_con.c int h = op->height; h 506 drivers/video/console/newport_con.c int size = h * op->charcount; h 512 drivers/video/console/newport_con.c if ((w != 8) || (h != 16) h 526 drivers/video/console/newport_con.c memcpy(p, data, h); h 528 drivers/video/console/newport_con.c p += h; h 322 drivers/video/fbdev/arcfb.c unsigned int right, unsigned int top, unsigned int h) h 326 drivers/video/fbdev/arcfb.c distance = h; h 344 drivers/video/fbdev/arcfb.c unsigned int dy, unsigned int w, unsigned int h) h 350 drivers/video/fbdev/arcfb.c h += dy - y; h 351 drivers/video/fbdev/arcfb.c h = iceil8(h); h 358 drivers/video/fbdev/arcfb.c arcfb_lcd_update_horiz(par, left, right, y, h); h 450 drivers/video/fbdev/arcfb.c unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount; h 486 drivers/video/fbdev/arcfb.c h = bitcount / xres; h 487 drivers/video/fbdev/arcfb.c arcfb_lcd_update(par, x, y, w, h); h 375 drivers/video/fbdev/atafb_utils.h static inline void memmove32_col(void *dst, void *src, u32 mask, u32 h, u32 bytes) h 396 drivers/video/fbdev/atafb_utils.h } while (--h); h 72 drivers/video/fbdev/aty/mach64_cursor.c int x, y, h; h 107 drivers/video/fbdev/aty/mach64_cursor.c h = cursor->image.height; h 115 drivers/video/fbdev/aty/mach64_cursor.c h<<=1; h 120 drivers/video/fbdev/aty/mach64_cursor.c ((u32) (64 - h + yoff) << 16) | xoff, par); h 102 drivers/video/fbdev/aty/radeon_accel.c u32 sx, sy, dx, dy, w, h; h 104 drivers/video/fbdev/aty/radeon_accel.c w = area->width; h = area->height; h 111 drivers/video/fbdev/aty/radeon_accel.c if ( ydir < 0 ) { sy += h-1; dy += h-1; } h 131 drivers/video/fbdev/aty/radeon_accel.c OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w); h 40 drivers/video/fbdev/broadsheetfb.c int h; h 55 drivers/video/fbdev/broadsheetfb.c .h = 600, h 67 drivers/video/fbdev/broadsheetfb.c .h = 240, h 79 drivers/video/fbdev/broadsheetfb.c .h = 825, h 788 drivers/video/fbdev/broadsheetfb.c args[1] = panel_table[par->panel_index].h; h 826 drivers/video/fbdev/broadsheetfb.c panel_table[par->panel_index].h)/2, h 915 drivers/video/fbdev/broadsheetfb.c panel_table[par->panel_index].h)/2, h 935 drivers/video/fbdev/broadsheetfb.c u16 y1 = 0, h = 0; h 951 drivers/video/fbdev/broadsheetfb.c h = h_inc; h 954 drivers/video/fbdev/broadsheetfb.c h += h_inc; h 957 drivers/video/fbdev/broadsheetfb.c broadsheetfb_dpy_update_pages(info->par, y1, y1 + h); h 960 drivers/video/fbdev/broadsheetfb.c h = h_inc; h 966 drivers/video/fbdev/broadsheetfb.c if (h >= yres) { h 971 drivers/video/fbdev/broadsheetfb.c min((u16) (y1 + h), yres)); h 1104 drivers/video/fbdev/broadsheetfb.c dpyh = panel_table[panel_index].h; h 2741 drivers/video/fbdev/cirrusfb.c long h, diff; h 2767 drivers/video/fbdev/cirrusfb.c h = ((14318 * n) / temp) >> s; h 2768 drivers/video/fbdev/cirrusfb.c h = h > freq ? h - freq : freq - h; h 2769 drivers/video/fbdev/cirrusfb.c if (h < diff) { h 2770 drivers/video/fbdev/cirrusfb.c diff = h; h 2782 drivers/video/fbdev/cirrusfb.c h = ((14318 * n) / d) >> s; h 2783 drivers/video/fbdev/cirrusfb.c h = h > freq ? h - freq : freq - h; h 2784 drivers/video/fbdev/cirrusfb.c if (h < diff) { h 2785 drivers/video/fbdev/cirrusfb.c diff = h; h 2565 drivers/video/fbdev/core/fbcon.c static int fbcon_do_set_font(struct vc_data *vc, int w, int h, h 2578 drivers/video/fbdev/core/fbcon.c resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); h 2589 drivers/video/fbdev/core/fbcon.c vc->vc_font.height = h; h 2601 drivers/video/fbdev/core/fbcon.c rows /= h; h 2644 drivers/video/fbdev/core/fbcon.c int h = font->height; h 2664 drivers/video/fbdev/core/fbcon.c size = h * pitch * charcount; h 2676 drivers/video/fbdev/core/fbcon.c memcpy(new_data + i*h*pitch, data + i*32*pitch, h*pitch); h 60 drivers/video/fbdev/core/fbcon_rotate.h int i, j, h = height, w = width; h 66 drivers/video/fbdev/core/fbcon_rotate.h for (i = 0; i < h; i++) { h 78 drivers/video/fbdev/core/fbcon_rotate.h int i, j, h = height, w = width; h 84 drivers/video/fbdev/core/fbcon_rotate.h for (i = 0; i < h; i++) { h 369 drivers/video/fbdev/core/fbmem.c int i, j, h = height - 1; h 373 drivers/video/fbdev/core/fbmem.c out[height * j + h - i] = *in++; h 1528 drivers/video/fbdev/core/fbmem.c struct aperture *h = &hwa->ranges[i]; h 1534 drivers/video/fbdev/core/fbmem.c (unsigned long long)h->base, h 1535 drivers/video/fbdev/core/fbmem.c (unsigned long long)h->size); h 1536 drivers/video/fbdev/core/fbmem.c if (apertures_overlap(g, h)) h 1003 drivers/video/fbdev/fsl-diu-fb.c unsigned int h, w; h 1005 drivers/video/fbdev/fsl-diu-fb.c for (h = 0; h < height; h++) { h 126 drivers/video/fbdev/hpfb.c static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr) h 142 drivers/video/fbdev/hpfb.c out_be16(fb_regs + WHEIGHT, h); h 73 drivers/video/fbdev/imxfb.c #define LCWHB_CH(h) (((h) & 0x1f) << 16) h 1658 drivers/video/fbdev/intelfb/intelfbhw.c u32 h, u32 color, u32 pitch, u32 bpp, u32 rop) h 1664 drivers/video/fbdev/intelfb/intelfbhw.c "rop 0x%02x\n", x, y, w, h, color, pitch, bpp, rop); h 1670 drivers/video/fbdev/intelfb/intelfbhw.c br14 = (h << HEIGHT_SHIFT) | ((w * (bpp / 8)) << WIDTH_SHIFT); h 1703 drivers/video/fbdev/intelfb/intelfbhw.c u32 dstx, u32 dsty, u32 w, u32 h, u32 pitch, u32 bpp) h 1709 drivers/video/fbdev/intelfb/intelfbhw.c curx, cury, dstx, dsty, w, h, pitch, bpp); h 1719 drivers/video/fbdev/intelfb/intelfbhw.c ((dsty + h) << HEIGHT_SHIFT); h 1748 drivers/video/fbdev/intelfb/intelfbhw.c u32 h, const u8* cdat, u32 x, u32 y, u32 pitch, h 1757 drivers/video/fbdev/intelfb/intelfbhw.c DBG_MSG("intelfbhw_do_drawglyph: (%d,%d) %dx%d\n", x, y, w, h); h 1764 drivers/video/fbdev/intelfb/intelfbhw.c nbytes = nbytes * h; h 1789 drivers/video/fbdev/intelfb/intelfbhw.c br23 = ((x + w) << WIDTH_SHIFT) | ((y + h) << HEIGHT_SHIFT); h 1821 drivers/video/fbdev/intelfb/intelfbhw.c if (ix == iw && iy != (h-1)) { h 587 drivers/video/fbdev/intelfb/intelfbhw.h u32 w, u32 h, u32 color, u32 pitch, u32 bpp, h 590 drivers/video/fbdev/intelfb/intelfbhw.h u32 dstx, u32 dsty, u32 w, u32 h, u32 pitch, h 593 drivers/video/fbdev/intelfb/intelfbhw.h u32 w, u32 h, const u8* cdat, u32 x, u32 y, h 737 drivers/video/fbdev/matrox/matroxfb_maven.c unsigned int h = ht + 2 + x; h 739 drivers/video/fbdev/matrox/matroxfb_maven.c if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) { h 740 drivers/video/fbdev/matrox/matroxfb_maven.c unsigned int diff = h - h2; h 748 drivers/video/fbdev/matrox/matroxfb_maven.c m->htotal = h - 2; h 436 drivers/video/fbdev/matrox/matroxfb_misc.c unsigned char h; h 438 drivers/video/fbdev/matrox/matroxfb_misc.c h = readb(vbios + pcir_offset + 0x12); h 439 drivers/video/fbdev/matrox/matroxfb_misc.c bd->version.vMaj = (h >> 4) & 0xF; h 440 drivers/video/fbdev/matrox/matroxfb_misc.c bd->version.vMin = h & 0xF; h 443 drivers/video/fbdev/matrox/matroxfb_misc.c unsigned char h; h 445 drivers/video/fbdev/matrox/matroxfb_misc.c h = readb(vbios + 5); h 446 drivers/video/fbdev/matrox/matroxfb_misc.c bd->version.vMaj = (h >> 4) & 0xF; h 447 drivers/video/fbdev/matrox/matroxfb_misc.c bd->version.vMin = h & 0xF; h 193 drivers/video/fbdev/nvidia/nv_accel.c int h = y2 - y1 + 1; h 198 drivers/video/fbdev/nvidia/nv_accel.c NVDmaNext(par, (h << 16) | w); h 115 drivers/video/fbdev/nvidia/nvidia.c u16 bg, u16 fg, u32 w, u32 h) h 123 drivers/video/fbdev/nvidia/nvidia.c for (i = 0; i < h; i++) { h 333 drivers/video/fbdev/omap/hwa742.c int h = par->height; h 345 drivers/video/fbdev/omap/hwa742.c x, y, w, h, scr_width, color_mode, flags); h 376 drivers/video/fbdev/omap/hwa742.c enable_tearsync(y, scr_width, h, scr_height, h 381 drivers/video/fbdev/omap/hwa742.c set_window_regs(x, y, x + w, y + h); h 386 drivers/video/fbdev/omap/hwa742.c OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h, h 392 drivers/video/fbdev/omap/hwa742.c hwa742.extif->transfer_area(w, h, request_complete, req); h 192 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c u16 x, u16 y, u16 w, u16 h) h 199 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c u16 y2 = y + h - 1; h 862 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c u16 x, u16 y, u16 w, u16 h) h 868 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); h 996 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c u16 x, u16 y, u16 w, u16 h) h 1005 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c if (size < w * h * 3) h 1015 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c size = min(w * h * 3, h 1033 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c dsicm_set_update_window(ddata, x, y, w, h); h 139 drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v) h 143 drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c if (h) h 654 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 h, hv; h 656 drivers/video/fbdev/omap2/omapfb/dss/dispc.c h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0) h 666 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_ovl_write_firh_reg(plane, i, h); h 669 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_ovl_write_firh2_reg(plane, i, h); h 3912 drivers/video/fbdev/omap2/omapfb/dss/dsi.c u16 h = dsi->timings.y_res; h 3914 drivers/video/fbdev/omap2/omapfb/dss/dsi.c DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); h 3920 drivers/video/fbdev/omap2/omapfb/dss/dsi.c bytespf = bytespl * h; h 279 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c u32 x, u32 y, u32 w, u32 h) h 287 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c if (w == 0 || h == 0) h 292 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c if (x + w > dw || y + h > dh) h 295 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c return display->driver->update(display, x, y, w, h); h 488 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c if (mr->w > 4096 || mr->h > 4096) h 491 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c if (mr->w * mr->h * 3 > mr->buffer_size) h 501 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c mr->x, mr->y, mr->w, mr->h); h 893 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c u32 w, h; h 894 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c display->driver->get_dimensions(display, &w, &h); h 896 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c p.display_info.height = h; h 93 drivers/video/fbdev/omap2/omapfb/omapfb-main.c const short h = var->yres_virtual; h 100 drivers/video/fbdev/omap2/omapfb/omapfb-main.c DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length); h 102 drivers/video/fbdev/omap2/omapfb/omapfb-main.c for (y = 0; y < h; y++) { h 106 drivers/video/fbdev/omap2/omapfb/omapfb-main.c else if (x < 20 && (y > 20 && y < h - 20)) h 110 drivers/video/fbdev/omap2/omapfb/omapfb-main.c else if (x > w - 20 && (y > 20 && y < h - 20)) h 112 drivers/video/fbdev/omap2/omapfb/omapfb-main.c else if (y > h - 20 && (x > 20 && x < w - 20)) h 115 drivers/video/fbdev/omap2/omapfb/omapfb-main.c y == 20 || y == h - 20) h 117 drivers/video/fbdev/omap2/omapfb/omapfb-main.c else if (x == y || w - x == h - y) h 119 drivers/video/fbdev/omap2/omapfb/omapfb-main.c else if (w - x == y || x == h - y) h 121 drivers/video/fbdev/omap2/omapfb/omapfb-main.c else if (x > 20 && y > 20 && x < w - 20 && y < h - 20) { h 699 drivers/video/fbdev/omap2/omapfb/omapfb-main.c u32 w, h; h 700 drivers/video/fbdev/omap2/omapfb/omapfb-main.c display->driver->get_dimensions(display, &w, &h); h 702 drivers/video/fbdev/omap2/omapfb/omapfb-main.c var->height = DIV_ROUND_CLOSEST(h, 1000); h 1446 drivers/video/fbdev/omap2/omapfb/omapfb-main.c u16 w, h; h 1448 drivers/video/fbdev/omap2/omapfb/omapfb-main.c display->driver->get_resolution(display, &w, &h); h 1451 drivers/video/fbdev/omap2/omapfb/omapfb-main.c size = max(omap_vrfb_min_phys_size(w, h, bytespp), h 1452 drivers/video/fbdev/omap2/omapfb/omapfb-main.c omap_vrfb_min_phys_size(h, w, bytespp)); h 1455 drivers/video/fbdev/omap2/omapfb/omapfb-main.c w * h * bytespp, size); h 1457 drivers/video/fbdev/omap2/omapfb/omapfb-main.c size = w * h * bytespp; h 1663 drivers/video/fbdev/omap2/omapfb/omapfb-main.c u16 w, h; h 1680 drivers/video/fbdev/omap2/omapfb/omapfb-main.c dssdrv->get_resolution(dssdev, &w, &h); h 1681 drivers/video/fbdev/omap2/omapfb/omapfb-main.c dssdrv->update(dssdev, 0, 0, w, h); h 1753 drivers/video/fbdev/omap2/omapfb/omapfb-main.c u16 w, h; h 1756 drivers/video/fbdev/omap2/omapfb/omapfb-main.c display->driver->get_resolution(display, &w, &h); h 1760 drivers/video/fbdev/omap2/omapfb/omapfb-main.c var->xres = h; h 1764 drivers/video/fbdev/omap2/omapfb/omapfb-main.c var->yres = h; h 2326 drivers/video/fbdev/omap2/omapfb/omapfb-main.c u16 w, h; h 2343 drivers/video/fbdev/omap2/omapfb/omapfb-main.c dssdrv->get_resolution(dssdev, &w, &h); h 2344 drivers/video/fbdev/omap2/omapfb/omapfb-main.c r = dssdrv->update(dssdev, 0, 0, w, h); h 2589 drivers/video/fbdev/omap2/omapfb/omapfb-main.c u16 w, h; h 2591 drivers/video/fbdev/omap2/omapfb/omapfb-main.c def_display->driver->get_resolution(def_display, &w, &h); h 2594 drivers/video/fbdev/omap2/omapfb/omapfb-main.c def_display->name, w, h); h 488 drivers/video/fbdev/riva/fbdev.c u16 bg, u16 fg, u32 w, u32 h) h 498 drivers/video/fbdev/riva/fbdev.c for (i = 0; i < h; i++) { h 118 drivers/video/fbdev/savage/savagefb.h #define BCI_W_H(w, h) (((h) << 16) | ((w) & 0xFFF)) h 146 drivers/video/fbdev/sis/sis_accel.c SiS300SubsequentSolidFillRect(struct sis_video_info *ivideo, int x, int y, int w, int h) h 156 drivers/video/fbdev/sis/sis_accel.c SiS300SetupRect(w,h) h 248 drivers/video/fbdev/sis/sis_accel.c SiS310SubsequentSolidFillRect(struct sis_video_info *ivideo, int x, int y, int w, int h) h 259 drivers/video/fbdev/sis/sis_accel.c SiS310SetupRect(w,h) h 185 drivers/video/fbdev/sis/sis_accel.h #define SiS300SetupRect(w,h) \ h 187 drivers/video/fbdev/sis/sis_accel.h MMIO_OUT32(ivideo->mmio_vbase, BR(6), (h)<<16 | (w) );\ h 315 drivers/video/fbdev/sis/sis_accel.h #define SiS310SetupRect(w,h) \ h 317 drivers/video/fbdev/sis/sis_accel.h MMIO_OUT32(ivideo->mmio_vbase, RECT_WIDTH, (h)<<16 | (w) );\ h 497 drivers/video/fbdev/sis/sis_main.c if(monitor->hmin > sisfb_ddcsmodes[i].h) monitor->hmin = sisfb_ddcsmodes[i].h; h 498 drivers/video/fbdev/sis/sis_main.c if(monitor->hmax < sisfb_ddcsmodes[i].h) monitor->hmax = sisfb_ddcsmodes[i].h + 1; h 519 drivers/video/fbdev/sis/sis_main.c if(monitor->hmin > sisfb_ddcfmodes[j].h) monitor->hmin = sisfb_ddcfmodes[j].h; h 520 drivers/video/fbdev/sis/sis_main.c if(monitor->hmax < sisfb_ddcfmodes[j].h) monitor->hmax = sisfb_ddcfmodes[j].h + 1; h 453 drivers/video/fbdev/sis/sis_main.h u16 h; h 476 drivers/video/fbdev/sis/sis_main.h u16 h; h 73 drivers/video/fbdev/smscufx.c int w, h; h 1027 drivers/video/fbdev/smscufx.c ufx_handle_damage(dev, area->x, area->y, area->w, area->h); h 1123 drivers/video/fbdev/tdfxfb.c int h = 0; h 1131 drivers/video/fbdev/tdfxfb.c fb_writeb(*mask, cursorbase + h); h 1134 drivers/video/fbdev/tdfxfb.c fb_writeb(data, cursorbase + h + 8); h 1136 drivers/video/fbdev/tdfxfb.c h++; h 329 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) h 336 drivers/video/fbdev/tridentfb.c writemmr(par, DST2, point(x + w - 1, y + h - 1)); h 340 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 b) h 342 drivers/video/fbdev/tridentfb.c unsigned size = ((w + 31) >> 5) * h; h 349 drivers/video/fbdev/tridentfb.c writemmr(par, DST2, point(x + w - 1, y + h - 1)); h 355 drivers/video/fbdev/tridentfb.c u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) h 359 drivers/video/fbdev/tridentfb.c u32 s2 = point(x1 + w - 1, y1 + h - 1); h 361 drivers/video/fbdev/tridentfb.c u32 d2 = point(x2 + w - 1, y2 + h - 1); h 431 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) h 436 drivers/video/fbdev/tridentfb.c writemmr(par, OLDDIM, point(h, w)); h 443 drivers/video/fbdev/tridentfb.c u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) h 459 drivers/video/fbdev/tridentfb.c y1_tmp = y1 + h - 1; h 460 drivers/video/fbdev/tridentfb.c y2_tmp = y2 + h - 1; h 470 drivers/video/fbdev/tridentfb.c writemmr(par, OLDDIM, point(h, w)); h 503 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) h 511 drivers/video/fbdev/tridentfb.c writemmr(par, DST2, point(x + w - 1, y + h - 1)); h 517 drivers/video/fbdev/tridentfb.c u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) h 521 drivers/video/fbdev/tridentfb.c u32 s2 = point(x1 + w - 1, y1 + h - 1); h 523 drivers/video/fbdev/tridentfb.c u32 d2 = point(x2 + w - 1, y2 + h - 1); h 571 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) h 576 drivers/video/fbdev/tridentfb.c writemmr(par, OLDDIM, point(w - 1, h - 1)); h 582 drivers/video/fbdev/tridentfb.c u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) h 598 drivers/video/fbdev/tridentfb.c y1_tmp = y1 + h - 1; h 599 drivers/video/fbdev/tridentfb.c y2_tmp = y2 + h - 1; h 609 drivers/video/fbdev/tridentfb.c writemmr(par, OLDDIM, point(w - 1, h - 1)); h 918 drivers/video/fbdev/udlfb.c dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h); h 318 drivers/video/fbdev/uvesafb.c int i, match = -1, h = 0, d = 0x7fffffff; h 321 drivers/video/fbdev/uvesafb.c h = abs(par->vbe_modes[i].x_res - xres) + h 329 drivers/video/fbdev/uvesafb.c if (h == 0) h 332 drivers/video/fbdev/uvesafb.c if (h < d || (h == d && par->vbe_modes[i].depth > depth)) { h 333 drivers/video/fbdev/uvesafb.c d = h; h 1434 drivers/video/fbdev/uvesafb.c int i, h; h 1451 drivers/video/fbdev/uvesafb.c h = par->vbe_modes[i].bytes_per_scan_line * h 1453 drivers/video/fbdev/uvesafb.c if (h > size_remap) h 1454 drivers/video/fbdev/uvesafb.c size_remap = h; h 386 drivers/video/fbdev/w100fb.c u32 h = area->height, w = area->width; h 405 drivers/video/fbdev/w100fb.c writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT); h 91 drivers/video/fbdev/xen-fbfront.c int x, int y, int w, int h) h 100 drivers/video/fbdev/xen-fbfront.c event.update.height = h; h 141 drivers/video/fbdev/xen-fbfront.c int x1, int y1, int w, int h) h 145 drivers/video/fbdev/xen-fbfront.c int y2 = y1 + h - 1; h 419 drivers/w1/slaves/w1_therm.c int t, h; h 430 drivers/w1/slaves/w1_therm.c h = 1000*((s32)rom[7] - (s32)rom[6]); h 431 drivers/w1/slaves/w1_therm.c h /= (s32)rom[7]; h 432 drivers/w1/slaves/w1_therm.c t += h; h 577 drivers/xen/grant-table.c int h = get_free_entries(count); h 579 drivers/xen/grant-table.c if (h < 0) h 582 drivers/xen/grant-table.c *head = h; h 693 drivers/xen/privcmd.c set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr); h 33 drivers/xen/time.c u32 h, l, h2; h 42 drivers/xen/time.c h = READ_ONCE(p32[1]); h 45 drivers/xen/time.c } while(h2 != h); h 47 drivers/xen/time.c ret = (((u64)h) << 32) | l; h 59 fs/9p/fid.c struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata; h 61 fs/9p/fid.c hlist_for_each_entry(fid, h, dlist) { h 14 fs/adfs/dir_fplus.c struct adfs_bigdirheader *h; h 36 fs/adfs/dir_fplus.c h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data; h 37 fs/adfs/dir_fplus.c size = le32_to_cpu(h->bigdirsize); h 44 fs/adfs/dir_fplus.c if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 || h 45 fs/adfs/dir_fplus.c h->bigdirversion[2] != 0 || size & 2047 || h 46 fs/adfs/dir_fplus.c h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) { h 90 fs/adfs/dir_fplus.c t->bigdirendmasseq != h->startmasseq || h 96 fs/adfs/dir_fplus.c dir->parent_id = le32_to_cpu(h->bigdirparent); h 119 fs/adfs/dir_fplus.c struct adfs_bigdirheader *h = h 123 fs/adfs/dir_fplus.c if (fpos <= le32_to_cpu(h->bigdirentries)) { h 162 fs/adfs/dir_fplus.c struct adfs_bigdirheader *h = h 168 fs/adfs/dir_fplus.c if (dir->pos >= le32_to_cpu(h->bigdirentries)) h 172 fs/adfs/dir_fplus.c offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3); h 185 fs/adfs/dir_fplus.c offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3); h 186 fs/adfs/dir_fplus.c offset += le32_to_cpu(h->bigdirentries) * sizeof(struct adfs_bigdirentry); h 715 fs/afs/internal.h unsigned long h; /* Hash value for this permit list */ h 126 fs/afs/security.c unsigned long h = permits->nr_permits; h 130 fs/afs/security.c h += (unsigned long)permits->permits[i].key / sizeof(void *); h 131 fs/afs/security.c h += permits->permits[i].access; h 134 fs/afs/security.c permits->h = h; h 254 fs/afs/security.c hash_for_each_possible(afs_permits_cache, xpermits, hash_node, new->h) { h 255 fs/afs/security.c if (xpermits->h != new->h || h 272 fs/afs/security.c hash_add_rcu(afs_permits_cache, &new->hash_node, new->h); h 208 fs/btrfs/btrfs_inode.h u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME); h 211 fs/btrfs/btrfs_inode.h h = (h >> 32) ^ (h & 0xffffffff); h 214 fs/btrfs/btrfs_inode.h return (unsigned long)h; h 219 fs/btrfs/btrfs_inode.h unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root); h 221 fs/btrfs/btrfs_inode.h __insert_inode_hash(inode, h); h 260 fs/btrfs/check-integrity.c static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h); h 262 fs/btrfs/check-integrity.c struct btrfsic_block_hashtable *h); h 267 fs/btrfs/check-integrity.c struct btrfsic_block_hashtable *h); h 269 fs/btrfs/check-integrity.c struct btrfsic_block_link_hashtable *h); h 272 fs/btrfs/check-integrity.c struct btrfsic_block_link_hashtable *h); h 279 fs/btrfs/check-integrity.c struct btrfsic_block_link_hashtable *h); h 281 fs/btrfs/check-integrity.c struct btrfsic_dev_state_hashtable *h); h 284 fs/btrfs/check-integrity.c struct btrfsic_dev_state_hashtable *h); h 287 fs/btrfs/check-integrity.c struct btrfsic_dev_state_hashtable *h); h 488 fs/btrfs/check-integrity.c static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) h 493 fs/btrfs/check-integrity.c INIT_LIST_HEAD(h->table + i); h 497 fs/btrfs/check-integrity.c struct btrfsic_block_hashtable *h) h 504 fs/btrfs/check-integrity.c list_add(&b->collision_resolving_node, h->table + hashval); h 515 fs/btrfs/check-integrity.c struct btrfsic_block_hashtable *h) h 523 fs/btrfs/check-integrity.c list_for_each_entry(b, h->table + hashval, collision_resolving_node) { h 532 fs/btrfs/check-integrity.c struct btrfsic_block_link_hashtable *h) h 537 fs/btrfs/check-integrity.c INIT_LIST_HEAD(h->table + i); h 542 fs/btrfs/check-integrity.c struct btrfsic_block_link_hashtable *h) h 553 fs/btrfs/check-integrity.c list_add(&l->collision_resolving_node, h->table + hashval); h 566 fs/btrfs/check-integrity.c struct btrfsic_block_link_hashtable *h) h 576 fs/btrfs/check-integrity.c list_for_each_entry(l, h->table + hashval, collision_resolving_node) { h 590 fs/btrfs/check-integrity.c struct btrfsic_dev_state_hashtable *h) h 595 fs/btrfs/check-integrity.c INIT_LIST_HEAD(h->table + i); h 600 fs/btrfs/check-integrity.c struct btrfsic_dev_state_hashtable *h) h 606 fs/btrfs/check-integrity.c list_add(&ds->collision_resolving_node, h->table + hashval); h 615 fs/btrfs/check-integrity.c struct btrfsic_dev_state_hashtable *h) h 621 fs/btrfs/check-integrity.c list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { h 1714 fs/btrfs/check-integrity.c struct btrfs_header *h; h 1721 fs/btrfs/check-integrity.c h = (struct btrfs_header *)datav[0]; h 1723 fs/btrfs/check-integrity.c if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)) h 1737 fs/btrfs/check-integrity.c if (memcmp(csum, h->csum, state->csum_size)) h 1817 fs/btrfs/ctree.h const struct btrfs_free_space_header *h, h 1820 fs/btrfs/ctree.h read_eb_member(eb, h, struct btrfs_free_space_header, location, key); h 1824 fs/btrfs/ctree.h struct btrfs_free_space_header *h, h 1827 fs/btrfs/ctree.h write_eb_member(eb, h, struct btrfs_free_space_header, location, key); h 28 fs/btrfs/props.c struct hlist_head *h; h 30 fs/btrfs/props.c h = &prop_handlers_ht[hash_min(hash, BTRFS_PROP_HANDLERS_HT_BITS)]; h 31 fs/btrfs/props.c if (hlist_empty(h)) h 34 fs/btrfs/props.c return h; h 41 fs/btrfs/props.c struct prop_handler *h; h 51 fs/btrfs/props.c hlist_for_each_entry(h, handlers, node) h 52 fs/btrfs/props.c if (!strcmp(h->xattr_name, name)) h 53 fs/btrfs/props.c return h; h 338 fs/btrfs/props.c const struct prop_handler *h = &prop_handlers[i]; h 342 fs/btrfs/props.c if (!h->inheritable) h 345 fs/btrfs/props.c value = h->extract(parent); h 353 fs/btrfs/props.c ret = h->validate(value, strlen(value)); h 372 fs/btrfs/props.c ret = btrfs_setxattr(trans, inode, h->xattr_name, value, h 375 fs/btrfs/props.c ret = h->apply(inode, value, strlen(value)); h 377 fs/btrfs/props.c btrfs_setxattr(trans, inode, h->xattr_name, h 444 fs/btrfs/props.c u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name)); h 446 fs/btrfs/props.c hash_add(prop_handlers_ht, &p->node, h); h 206 fs/btrfs/raid56.c struct btrfs_stripe_hash *h; h 221 fs/btrfs/raid56.c table_size = sizeof(*table) + sizeof(*h) * num_entries; h 229 fs/btrfs/raid56.c h = table->table; h 232 fs/btrfs/raid56.c cur = h + i; h 349 fs/btrfs/raid56.c struct btrfs_stripe_hash *h; h 359 fs/btrfs/raid56.c h = table->table + bucket; h 364 fs/btrfs/raid56.c spin_lock(&h->lock); h 396 fs/btrfs/raid56.c spin_unlock(&h->lock); h 675 fs/btrfs/raid56.c struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; h 683 fs/btrfs/raid56.c spin_lock_irqsave(&h->lock, flags); h 684 fs/btrfs/raid56.c list_for_each_entry(cur, &h->hash_list, hash_list) { h 744 fs/btrfs/raid56.c list_add(&rbio->hash_list, &h->hash_list); h 746 fs/btrfs/raid56.c spin_unlock_irqrestore(&h->lock, flags); h 761 fs/btrfs/raid56.c struct btrfs_stripe_hash *h; h 766 fs/btrfs/raid56.c h = rbio->fs_info->stripe_hash_table->table + bucket; h 771 fs/btrfs/raid56.c spin_lock_irqsave(&h->lock, flags); h 805 fs/btrfs/raid56.c list_add(&next->hash_list, &h->hash_list); h 808 fs/btrfs/raid56.c spin_unlock_irqrestore(&h->lock, flags); h 828 fs/btrfs/raid56.c spin_unlock_irqrestore(&h->lock, flags); h 1838 fs/btrfs/scrub.c struct btrfs_header *h; h 1856 fs/btrfs/scrub.c h = (struct btrfs_header *)mapped_buffer; h 1857 fs/btrfs/scrub.c memcpy(on_disk_csum, h->csum, sctx->csum_size); h 1864 fs/btrfs/scrub.c if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) h 1867 fs/btrfs/scrub.c if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { h 1872 fs/btrfs/scrub.c if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) h 1875 fs/btrfs/scrub.c if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, h 449 fs/btrfs/transaction.c struct btrfs_trans_handle *h; h 464 fs/btrfs/transaction.c h = current->journal_info; h 465 fs/btrfs/transaction.c refcount_inc(&h->use_count); h 466 fs/btrfs/transaction.c WARN_ON(refcount_read(&h->use_count) > 2); h 467 fs/btrfs/transaction.c h->orig_rsv = h->block_rsv; h 468 fs/btrfs/transaction.c h->block_rsv = NULL; h 529 fs/btrfs/transaction.c h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); h 530 fs/btrfs/transaction.c if (!h) { h 566 fs/btrfs/transaction.c h->transid = cur_trans->transid; h 567 fs/btrfs/transaction.c h->transaction = cur_trans; h 568 fs/btrfs/transaction.c h->root = root; h 569 fs/btrfs/transaction.c refcount_set(&h->use_count, 1); h 570 fs/btrfs/transaction.c h->fs_info = root->fs_info; h 572 fs/btrfs/transaction.c h->type = type; h 573 fs/btrfs/transaction.c h->can_flush_pending_bgs = true; h 574 fs/btrfs/transaction.c INIT_LIST_HEAD(&h->new_bgs); h 579 fs/btrfs/transaction.c current->journal_info = h; h 580 fs/btrfs/transaction.c btrfs_commit_transaction(h); h 586 fs/btrfs/transaction.c h->transid, num_bytes, 1); h 587 fs/btrfs/transaction.c h->block_rsv = &fs_info->trans_block_rsv; h 588 fs/btrfs/transaction.c h->bytes_reserved = num_bytes; h 589 fs/btrfs/transaction.c h->reloc_reserved = reloc_reserved; h 594 fs/btrfs/transaction.c current->journal_info = h; h 604 fs/btrfs/transaction.c btrfs_record_root_in_trans(h, root); h 606 fs/btrfs/transaction.c return h; h 611 fs/btrfs/transaction.c kmem_cache_free(btrfs_trans_handle_cachep, h); h 3808 fs/ceph/caps.c struct ceph_mds_caps *h; h 3824 fs/ceph/caps.c if (msg->front.iov_len < sizeof(*h)) h 3826 fs/ceph/caps.c h = msg->front.iov_base; h 3827 fs/ceph/caps.c op = le32_to_cpu(h->op); h 3828 fs/ceph/caps.c vino.ino = le64_to_cpu(h->ino); h 3830 fs/ceph/caps.c seq = le32_to_cpu(h->seq); h 3831 fs/ceph/caps.c mseq = le32_to_cpu(h->migrate_seq); h 3833 fs/ceph/caps.c snaptrace = h + 1; h 3834 fs/ceph/caps.c snaptrace_len = le32_to_cpu(h->snap_trace_len); h 3853 fs/ceph/caps.c peer = (void *)&h->size; h 3933 fs/ceph/caps.c cap->cap_id = le64_to_cpu(h->cap_id); h 3948 fs/ceph/caps.c h, session); h 3952 fs/ceph/caps.c handle_cap_export(inode, h, peer, session); h 3966 fs/ceph/caps.c handle_cap_import(mdsc, inode, h, peer, session, h 3969 fs/ceph/caps.c h, msg->middle, &extra_info); h 3993 fs/ceph/caps.c h, msg->middle, &extra_info); h 3998 fs/ceph/caps.c h, session, cap); h 4002 fs/ceph/caps.c handle_cap_trunc(inode, h, session); h 1033 fs/ceph/mds_client.c struct ceph_mds_session_head *h; h 1035 fs/ceph/mds_client.c msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, h 1041 fs/ceph/mds_client.c h = msg->front.iov_base; h 1042 fs/ceph/mds_client.c h->op = cpu_to_le32(op); h 1043 fs/ceph/mds_client.c h->seq = cpu_to_le64(seq); h 1076 fs/ceph/mds_client.c struct ceph_mds_session_head *h; h 1103 fs/ceph/mds_client.c msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes, h 1112 fs/ceph/mds_client.c h = p; h 1113 fs/ceph/mds_client.c h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); h 1114 fs/ceph/mds_client.c h->seq = cpu_to_le64(seq); h 1126 fs/ceph/mds_client.c p += sizeof(*h); h 3073 fs/ceph/mds_client.c struct ceph_mds_session_head *h; h 3080 fs/ceph/mds_client.c ceph_decode_need(&p, end, sizeof(*h), bad); h 3081 fs/ceph/mds_client.c h = p; h 3082 fs/ceph/mds_client.c p += sizeof(*h); h 3084 fs/ceph/mds_client.c op = le32_to_cpu(h->op); h 3085 fs/ceph/mds_client.c seq = le64_to_cpu(h->seq); h 3155 fs/ceph/mds_client.c ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); h 3871 fs/ceph/mds_client.c struct ceph_mds_lease *h = msg->front.iov_base; h 3880 fs/ceph/mds_client.c if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) h 3882 fs/ceph/mds_client.c vino.ino = le64_to_cpu(h->ino); h 3884 fs/ceph/mds_client.c seq = le32_to_cpu(h->seq); h 3885 fs/ceph/mds_client.c dname.len = get_unaligned_le32(h + 1); h 3886 fs/ceph/mds_client.c if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len) h 3888 fs/ceph/mds_client.c dname.name = (void *)(h + 1) + sizeof(u32); h 3893 fs/ceph/mds_client.c ceph_lease_op_name(h->action), vino.ino, inode, h 3919 fs/ceph/mds_client.c switch (h->action) { h 3923 fs/ceph/mds_client.c h->seq = cpu_to_le32(di->lease_seq); h 3935 fs/ceph/mds_client.c msecs_to_jiffies(le32_to_cpu(h->duration_ms)); h 3953 fs/ceph/mds_client.c h->action = CEPH_MDS_LEASE_REVOKE_ACK; h 42 fs/ceph/quota.c struct ceph_mds_quota *h = msg->front.iov_base; h 47 fs/ceph/quota.c if (msg->front.iov_len < sizeof(*h)) { h 60 fs/ceph/quota.c vino.ino = le64_to_cpu(h->ino); h 70 fs/ceph/quota.c ci->i_rbytes = le64_to_cpu(h->rbytes); h 71 fs/ceph/quota.c ci->i_rfiles = le64_to_cpu(h->rfiles); h 72 fs/ceph/quota.c ci->i_rsubdirs = le64_to_cpu(h->rsubdirs); h 73 fs/ceph/quota.c __ceph_update_quota(ci, le64_to_cpu(h->max_bytes), h 74 fs/ceph/quota.c le64_to_cpu(h->max_files)); h 854 fs/ceph/snap.c struct ceph_mds_snap_head *h; h 861 fs/ceph/snap.c if (msg->front.iov_len < sizeof(*h)) h 863 fs/ceph/snap.c h = p; h 864 fs/ceph/snap.c op = le32_to_cpu(h->op); h 865 fs/ceph/snap.c split = le64_to_cpu(h->split); /* non-zero if we are splitting an h 867 fs/ceph/snap.c num_split_inos = le32_to_cpu(h->num_split_inos); h 868 fs/ceph/snap.c num_split_realms = le32_to_cpu(h->num_split_realms); h 869 fs/ceph/snap.c trace_len = le32_to_cpu(h->trace_len); h 870 fs/ceph/snap.c p += sizeof(*h); h 589 fs/cifs/cifsproto.h void extract_unc_hostname(const char *unc, const char **h, size_t *len); h 310 fs/cifs/dfs_cache.c unsigned int h; h 312 fs/cifs/dfs_cache.c h = jhash(data, size, 0); h 313 fs/cifs/dfs_cache.c return h & (DFS_CACHE_HTABLE_SIZE - 1); h 568 fs/cifs/dfs_cache.c unsigned int h; h 572 fs/cifs/dfs_cache.c ce = find_cache_entry(path, &h); h 645 fs/cifs/dfs_cache.c unsigned int h; h 652 fs/cifs/dfs_cache.c ce = find_cache_entry(path, &h); h 698 fs/cifs/dfs_cache.c ce = add_cache_entry(h, path, nrefs, numnrefs); h 1051 fs/cifs/dfs_cache.c unsigned int h; h 1066 fs/cifs/dfs_cache.c ce = find_cache_entry(npath, &h); h 1382 fs/cifs/dfs_cache.c unsigned int h; h 1397 fs/cifs/dfs_cache.c ce = find_cache_entry(npath, &h); h 998 fs/cifs/misc.c void extract_unc_hostname(const char *unc, const char **h, size_t *len) h 1011 fs/cifs/misc.c *h = unc; h 20 fs/dlm/member.c int dlm_slots_version(struct dlm_header *h) h 22 fs/dlm/member.c if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS) h 21 fs/dlm/member.h int dlm_slots_version(struct dlm_header *h); h 539 fs/erofs/xattr.c const struct xattr_handler *h = h 542 fs/erofs/xattr.c if (!h || (h->list && !h->list(it->dentry))) h 545 fs/erofs/xattr.c prefix = xattr_prefix(h); h 37 fs/erofs/zmap.c struct z_erofs_map_header *h; h 61 fs/erofs/zmap.c h = kaddr + erofs_blkoff(pos); h 62 fs/erofs/zmap.c vi->z_advise = le16_to_cpu(h->h_advise); h 63 fs/erofs/zmap.c vi->z_algorithmtype[0] = h->h_algorithmtype & 15; h 64 fs/erofs/zmap.c vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; h 73 fs/erofs/zmap.c vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); h 75 fs/erofs/zmap.c ((h->h_clusterbits >> 3) & 3); h 85 fs/erofs/zmap.c ((h->h_clusterbits >> 5) & 7); h 621 fs/ext4/namei.c struct dx_hash_info h = *hinfo; h 648 fs/ext4/namei.c de->name_len, &h); h 650 fs/ext4/namei.c name, h.hash, h 681 fs/ext4/namei.c de->name_len, &h); h 683 fs/ext4/namei.c h.hash, (unsigned) ((char *) de h 691 fs/ext4/namei.c ext4fs_dirhash(dir, de->name, de->name_len, &h); h 692 fs/ext4/namei.c printk("%*.s:%x.%u ", len, name, h.hash, h 1214 fs/ext4/namei.c struct dx_hash_info h = *hinfo; h 1218 fs/ext4/namei.c ext4fs_dirhash(dir, de->name, de->name_len, &h); h 1220 fs/ext4/namei.c map_tail->hash = h.hash; h 3601 fs/ext4/namei.c int credits, handle_t **h) h 3626 fs/ext4/namei.c *h = handle; h 282 fs/fcntl.c u64 h; h 286 fs/fcntl.c h = file_write_hint(file); h 287 fs/fcntl.c if (copy_to_user(argp, &h, sizeof(*argp))) h 291 fs/fcntl.c if (copy_from_user(&h, argp, sizeof(h))) h 293 fs/fcntl.c hint = (enum rw_hint) h; h 302 fs/fcntl.c h = inode->i_write_hint; h 303 fs/fcntl.c if (copy_to_user(argp, &h, sizeof(*argp))) h 307 fs/fcntl.c if (copy_from_user(&h, argp, sizeof(h))) h 309 fs/fcntl.c hint = (enum rw_hint) h; h 77 fs/fscache/cookie.c unsigned long long h; h 98 fs/fscache/cookie.c h = (unsigned long)cookie->parent; h 99 fs/fscache/cookie.c h += index_key_len + cookie->type; h 102 fs/fscache/cookie.c h += buf[i]; h 104 fs/fscache/cookie.c cookie->key_hash = h ^ (h >> 32); h 198 fs/fscache/cookie.c struct hlist_bl_head *h; h 203 fs/fscache/cookie.c h = &fscache_cookie_hash[bucket]; h 205 fs/fscache/cookie.c hlist_bl_lock(h); h 206 fs/fscache/cookie.c hlist_bl_for_each_entry(cursor, p, h, hash_link) { h 214 fs/fscache/cookie.c hlist_bl_add_head(&candidate->hash_link, h); h 215 fs/fscache/cookie.c hlist_bl_unlock(h); h 225 fs/fscache/cookie.c hlist_bl_unlock(h); h 230 fs/fscache/cookie.c hlist_bl_unlock(h); h 842 fs/fscache/cookie.c struct hlist_bl_head *h; h 846 fs/fscache/cookie.c h = &fscache_cookie_hash[bucket]; h 848 fs/fscache/cookie.c hlist_bl_lock(h); h 850 fs/fscache/cookie.c hlist_bl_unlock(h); h 136 fs/fuse/dev.c req->in.h.uid = from_kuid(fc->user_ns, current_fsuid()); h 137 fs/fuse/dev.c req->in.h.gid = from_kgid(fc->user_ns, current_fsgid()); h 138 fs/fuse/dev.c req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); h 144 fs/fuse/dev.c if (unlikely(req->in.h.uid == ((uid_t)-1) || h 145 fs/fuse/dev.c req->in.h.gid == ((gid_t)-1))) { h 225 fs/fuse/dev.c req->in.h.len = sizeof(struct fuse_in_header) + h 263 fs/fuse/dev.c req->in.h.unique = fuse_get_unique(fiq); h 326 fs/fuse/dev.c req->args->end(fc, req->args, req->out.h.error); h 392 fs/fuse/dev.c req->out.h.error = -EINTR; h 413 fs/fuse/dev.c req->out.h.error = -ENOTCONN; h 415 fs/fuse/dev.c req->in.h.unique = fuse_get_unique(fiq); h 462 fs/fuse/dev.c req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); h 463 fs/fuse/dev.c req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); h 464 fs/fuse/dev.c req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); h 469 fs/fuse/dev.c req->in.h.opcode = args->opcode; h 470 fs/fuse/dev.c req->in.h.nodeid = args->nodeid; h 504 fs/fuse/dev.c ret = req->out.h.error; h 584 fs/fuse/dev.c req->in.h.unique = unique; h 1036 fs/fuse/dev.c ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); h 1037 fs/fuse/dev.c arg.unique = req->in.h.unique; h 1246 fs/fuse/dev.c reqsize = req->in.h.len; h 1250 fs/fuse/dev.c req->out.h.error = -EIO; h 1253 fs/fuse/dev.c req->out.h.error = -E2BIG; h 1261 fs/fuse/dev.c err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); h 1273 fs/fuse/dev.c req->out.h.error = -EIO; h 1280 fs/fuse/dev.c hash = fuse_req_hash(req->in.h.unique); h 1789 fs/fuse/dev.c if (req->in.h.unique == unique) h 1888 fs/fuse/dev.c req->out.h = oh; h 1906 fs/fuse/dev.c req->out.h.error = -EIO; h 2048 fs/fuse/dev.c req->out.h.error = -ECONNABORTED; h 2111 fs/fuse/dev.c req->out.h.error = -ECONNABORTED; h 347 fs/fuse/fuse_i.h struct fuse_in_header h; h 352 fs/fuse/fuse_i.h struct fuse_out_header h; h 305 fs/fuse/virtio_fs.c req->out.h.error = ret; h 416 fs/fuse/virtio_fs.c remaining = req->out.h.len - sizeof(req->out.h); h 911 fs/fuse/virtio_fs.c sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h)); h 920 fs/fuse/virtio_fs.c &req->out.h, sizeof(req->out.h)); h 997 fs/fuse/virtio_fs.c __func__, req->in.h.opcode, req->in.h.unique, h 998 fs/fuse/virtio_fs.c req->in.h.nodeid, req->in.h.len, h 1017 fs/fuse/virtio_fs.c req->out.h.error = ret; h 325 fs/gfs2/bmap.c unsigned int x, unsigned int h) h 327 fs/gfs2/bmap.c for (; x < h; x++) { h 375 fs/gfs2/bmap.c static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h) h 380 fs/gfs2/bmap.c if (h) { h 382 fs/gfs2/bmap.c for (x = h - 1; x > 0; x--) { h 387 fs/gfs2/bmap.c ret = __fillup_metapath(ip, mp, x, h); h 1647 fs/gfs2/bmap.c static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h) h 1649 fs/gfs2/bmap.c if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0]))) h 1664 fs/gfs2/bmap.c unsigned int h, h 1667 fs/gfs2/bmap.c struct buffer_head *bh = mp->mp_bh[h]; h 1670 fs/gfs2/bmap.c first = metaptr1(h, mp); h 1671 fs/gfs2/bmap.c ptr = first + mp->mp_list[h]; h 1673 fs/gfs2/bmap.c if (end_list && mp_eq_to_hgt(mp, end_list, h)) { h 1674 fs/gfs2/bmap.c bool keep_end = h < end_aligned; h 1675 fs/gfs2/bmap.c end = first + end_list[h] + keep_end; h 1680 fs/gfs2/bmap.c mp->mp_list[h] = ptr - first; h 1681 fs/gfs2/bmap.c h++; h 1682 fs/gfs2/bmap.c if (h < GFS2_MAX_META_HEIGHT) h 1683 fs/gfs2/bmap.c mp->mp_list[h] = 0; h 81 fs/gfs2/dir.c #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) h 536 fs/gfs2/dir.c const struct gfs2_meta_header *h = buf; h 541 fs/gfs2/dir.c switch(be32_to_cpu(h->mh_type)) { h 554 fs/gfs2/dir.c be32_to_cpu(h->mh_type)); h 1159 fs/gfs2/dir.c __be64 *hc2, *h; h 1177 fs/gfs2/dir.c h = hc2; h 1183 fs/gfs2/dir.c *h++ = *hc; h 1184 fs/gfs2/dir.c *h++ = *hc; h 88 fs/gfs2/quota.c unsigned int h; h 90 fs/gfs2/quota.c h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0); h 91 fs/gfs2/quota.c h = jhash(&qid, sizeof(struct kqid), h); h 93 fs/gfs2/quota.c return h & GFS2_QD_HASH_MASK; h 245 fs/gfs2/quota.c struct hlist_bl_node *h; h 247 fs/gfs2/quota.c hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) { h 94 fs/hfs/hfs.h __be16 h; h 224 fs/hfsplus/hfsplus_raw.h __be16 h; h 247 fs/hpfs/dnode.c int h; h 303 fs/hpfs/dnode.c h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; h 314 fs/hpfs/dnode.c for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) { h 198 fs/hpfs/ea.c unsigned char h[4]; h 351 fs/hpfs/ea.c h[0] = 0; h 352 fs/hpfs/ea.c h[1] = strlen(key); h 353 fs/hpfs/ea.c h[2] = size & 0xff; h 354 fs/hpfs/ea.c h[3] = size >> 8; h 355 fs/hpfs/ea.c if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; h 356 fs/hpfs/ea.c if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; h 357 fs/hpfs/ea.c if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; h 140 fs/hugetlbfs/inode.c struct hstate *h = hstate_file(file); h 165 fs/hugetlbfs/inode.c if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) h 179 fs/hugetlbfs/inode.c vma->vm_pgoff >> huge_page_order(h), h 180 fs/hugetlbfs/inode.c len >> huge_page_shift(h), vma, h 204 fs/hugetlbfs/inode.c struct hstate *h = hstate_file(file); h 207 fs/hugetlbfs/inode.c if (len & ~huge_page_mask(h)) h 219 fs/hugetlbfs/inode.c addr = ALIGN(addr, huge_page_size(h)); h 230 fs/hugetlbfs/inode.c info.align_mask = PAGE_MASK & ~huge_page_mask(h); h 273 fs/hugetlbfs/inode.c struct hstate *h = hstate_file(file); h 276 fs/hugetlbfs/inode.c unsigned long index = iocb->ki_pos >> huge_page_shift(h); h 277 fs/hugetlbfs/inode.c unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); h 287 fs/hugetlbfs/inode.c nr = huge_page_size(h); h 291 fs/hugetlbfs/inode.c end_index = (isize - 1) >> huge_page_shift(h); h 295 fs/hugetlbfs/inode.c nr = ((isize - 1) & ~huge_page_mask(h)) + 1; h 325 fs/hugetlbfs/inode.c index += offset >> huge_page_shift(h); h 326 fs/hugetlbfs/inode.c offset &= ~huge_page_mask(h); h 328 fs/hugetlbfs/inode.c iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; h 417 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode); h 419 fs/hugetlbfs/inode.c const pgoff_t start = lstart >> huge_page_shift(h); h 420 fs/hugetlbfs/inode.c const pgoff_t end = lend >> huge_page_shift(h); h 443 fs/hugetlbfs/inode.c hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); h 460 fs/hugetlbfs/inode.c index * pages_per_huge_page(h), h 461 fs/hugetlbfs/inode.c (index + 1) * pages_per_huge_page(h)); h 518 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode); h 520 fs/hugetlbfs/inode.c BUG_ON(offset & ~huge_page_mask(h)); h 534 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode); h 535 fs/hugetlbfs/inode.c loff_t hpage_size = huge_page_size(h); h 576 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode); h 579 fs/hugetlbfs/inode.c loff_t hpage_size = huge_page_size(h); h 580 fs/hugetlbfs/inode.c unsigned long hpage_shift = huge_page_shift(h); h 647 fs/hugetlbfs/inode.c hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); h 667 fs/hugetlbfs/inode.c clear_huge_page(page, addr, pages_per_huge_page(h)); h 697 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode); h 712 fs/hugetlbfs/inode.c if (newsize & ~huge_page_mask(h)) h 964 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(d_inode(dentry)); h 967 fs/hugetlbfs/inode.c buf->f_bsize = huge_page_size(h); h 1127 fs/hugetlbfs/inode.c hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, h 1134 fs/hugetlbfs/inode.c size_opt <<= huge_page_shift(h); h 1135 fs/hugetlbfs/inode.c size_opt *= h->max_huge_pages; h 1139 fs/hugetlbfs/inode.c size_opt >>= huge_page_shift(h); h 1356 fs/hugetlbfs/inode.c struct hstate *h = hstate_sizelog(page_size_log); h 1358 fs/hugetlbfs/inode.c if (!h) h 1360 fs/hugetlbfs/inode.c return h - hstates; h 1427 fs/hugetlbfs/inode.c static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) h 1437 fs/hugetlbfs/inode.c ctx->hstate = h; h 1443 fs/hugetlbfs/inode.c 1U << (h->order + PAGE_SHIFT - 10)); h 1450 fs/hugetlbfs/inode.c struct hstate *h; h 1480 fs/hugetlbfs/inode.c for_each_hstate(h) { h 1486 fs/hugetlbfs/inode.c mnt = mount_one_hugetlbfs(h); h 94 fs/jbd2/commit.c struct commit_header *h; h 100 fs/jbd2/commit.c h = (struct commit_header *)(bh->b_data); h 101 fs/jbd2/commit.c h->h_chksum_type = 0; h 102 fs/jbd2/commit.c h->h_chksum_size = 0; h 103 fs/jbd2/commit.c h->h_chksum[0] = 0; h 105 fs/jbd2/commit.c h->h_chksum[0] = cpu_to_be32(csum); h 380 fs/jbd2/recovery.c struct commit_header *h; h 387 fs/jbd2/recovery.c h = buf; h 388 fs/jbd2/recovery.c provided = h->h_chksum[0]; h 389 fs/jbd2/recovery.c h->h_chksum[0] = 0; h 391 fs/jbd2/recovery.c h->h_chksum[0] = provided; h 3896 fs/jfs/jfs_dtree.c struct dtslot *h, *t; h 3915 fs/jfs/jfs_dtree.c h = &p->slot[fsi]; h 3916 fs/jfs/jfs_dtree.c p->header.freelist = h->next; h 3928 fs/jfs/jfs_dtree.c lh = (struct ldtentry *) h; h 3929 fs/jfs/jfs_dtree.c lh->next = h->next; h 3943 fs/jfs/jfs_dtree.c ih = (struct idtentry *) h; h 3944 fs/jfs/jfs_dtree.c ih->next = h->next; h 3958 fs/jfs/jfs_dtree.c t = h; h 4001 fs/jfs/jfs_dtree.c if (h == t) { h 4062 fs/jfs/jfs_dtree.c struct dtslot *h, *s, *d; h 4113 fs/jfs/jfs_dtree.c h = d = &dp->slot[dsi]; h 4120 fs/jfs/jfs_dtree.c dlh = (struct ldtentry *) h; h 4141 fs/jfs/jfs_dtree.c dih = (struct idtentry *) h; h 4209 fs/jfs/jfs_dtree.c if (h == d) { h 573 fs/jfs/jfs_logmgr.c lspn = le32_to_cpu(lp->h.page); h 623 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); h 644 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); h 645 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); h 784 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); h 792 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); h 876 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); h 1349 fs/jfs/jfs_logmgr.c le16_to_cpu(lp->h.eor)); h 1378 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); h 1665 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); h 2441 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(npages - 3); h 2442 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); h 2461 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(lspn); h 2462 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); h 114 fs/jfs/jfs_logmgr.h } h; h 62 fs/nfs/dns_resolve.c struct cache_head h; h 79 fs/nfs/dns_resolve.c new = container_of(cnew, struct nfs_dns_ent, h); h 80 fs/nfs/dns_resolve.c key = container_of(ckey, struct nfs_dns_ent, h); h 92 fs/nfs/dns_resolve.c new = container_of(cnew, struct nfs_dns_ent, h); h 93 fs/nfs/dns_resolve.c key = container_of(ckey, struct nfs_dns_ent, h); h 119 fs/nfs/dns_resolve.c item = container_of(ref, struct nfs_dns_ent, h.ref); h 131 fs/nfs/dns_resolve.c return &item->h; h 145 fs/nfs/dns_resolve.c struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); h 154 fs/nfs/dns_resolve.c struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); h 169 fs/nfs/dns_resolve.c a = container_of(ca, struct nfs_dns_ent, h); h 170 fs/nfs/dns_resolve.c b = container_of(cb, struct nfs_dns_ent, h); h 178 fs/nfs/dns_resolve.c struct cache_head *h) h 183 fs/nfs/dns_resolve.c if (h == NULL) { h 187 fs/nfs/dns_resolve.c item = container_of(h, struct nfs_dns_ent, h); h 188 fs/nfs/dns_resolve.c ttl = item->h.expiry_time - seconds_since_boot(); h 192 fs/nfs/dns_resolve.c if (!test_bit(CACHE_NEGATIVE, &h->flags)) { h 209 fs/nfs/dns_resolve.c &key->h, h 213 fs/nfs/dns_resolve.c return container_of(ch, struct nfs_dns_ent, h); h 223 fs/nfs/dns_resolve.c &new->h, &key->h, h 227 fs/nfs/dns_resolve.c return container_of(ch, struct nfs_dns_ent, h); h 255 fs/nfs/dns_resolve.c memset(&key.h, 0, sizeof(key.h)); h 261 fs/nfs/dns_resolve.c key.h.expiry_time = ttl + seconds_since_boot(); h 269 fs/nfs/dns_resolve.c set_bit(CACHE_NEGATIVE, &key.h.flags); h 276 fs/nfs/dns_resolve.c cache_put(&item->h, cd); h 290 fs/nfs/dns_resolve.c ret = cache_check(cd, &(*item)->h, &dreq->req); h 307 fs/nfs/dns_resolve.c if (!test_bit(CACHE_VALID, &(*item)->h.flags) h 308 fs/nfs/dns_resolve.c || (*item)->h.expiry_time < seconds_since_boot() h 309 fs/nfs/dns_resolve.c || cd->flush_time > (*item)->h.last_refresh) h 312 fs/nfs/dns_resolve.c if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags)) h 316 fs/nfs/dns_resolve.c cache_put(&(*item)->h, cd); h 361 fs/nfs/dns_resolve.c cache_put(&item->h, nn->nfs_dns_resolve); h 353 fs/nfs/pnfs_dev.c long h; h 357 fs/nfs/pnfs_dev.c for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) h 358 fs/nfs/pnfs_dev.c _deviceid_purge_client(clp, h); h 44 fs/nfsd/export.c struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); h 46 fs/nfsd/export.c if (test_bit(CACHE_VALID, &key->h.flags) && h 47 fs/nfsd/export.c !test_bit(CACHE_NEGATIVE, &key->h.flags)) h 54 fs/nfsd/export.c struct cache_head *h, h 58 fs/nfsd/export.c struct svc_expkey *ek = container_of(h, struct svc_expkey, h); h 119 fs/nfsd/export.c key.h.flags = 0; h 120 fs/nfsd/export.c key.h.expiry_time = get_expiry(&mesg); h 121 fs/nfsd/export.c if (key.h.expiry_time == 0) h 141 fs/nfsd/export.c set_bit(CACHE_NEGATIVE, &key.h.flags); h 160 fs/nfsd/export.c cache_put(&ek->h, cd); h 169 fs/nfsd/export.c struct cache_head *h) h 174 fs/nfsd/export.c if (h ==NULL) { h 178 fs/nfsd/export.c ek = container_of(h, struct svc_expkey, h); h 183 fs/nfsd/export.c if (test_bit(CACHE_VALID, &h->flags) && h 184 fs/nfsd/export.c !test_bit(CACHE_NEGATIVE, &h->flags)) { h 194 fs/nfsd/export.c struct svc_expkey *orig = container_of(a, struct svc_expkey, h); h 195 fs/nfsd/export.c struct svc_expkey *new = container_of(b, struct svc_expkey, h); h 207 fs/nfsd/export.c struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); h 208 fs/nfsd/export.c struct svc_expkey *item = container_of(citem, struct svc_expkey, h); h 220 fs/nfsd/export.c struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); h 221 fs/nfsd/export.c struct svc_expkey *item = container_of(citem, struct svc_expkey, h); h 231 fs/nfsd/export.c return &i->h; h 281 fs/nfsd/export.c ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); h 283 fs/nfsd/export.c return container_of(ch, struct svc_expkey, h); h 295 fs/nfsd/export.c ch = sunrpc_cache_update(cd, &new->h, &old->h, hash); h 297 fs/nfsd/export.c return container_of(ch, struct svc_expkey, h); h 325 fs/nfsd/export.c struct svc_export *exp = container_of(ref, struct svc_export, h.ref); h 334 fs/nfsd/export.c struct cache_head *h, h 338 fs/nfsd/export.c struct svc_export *exp = container_of(h, struct svc_export, h); h 568 fs/nfsd/export.c exp.h.expiry_time = get_expiry(&mesg); h 569 fs/nfsd/export.c if (exp.h.expiry_time == 0) h 576 fs/nfsd/export.c set_bit(CACHE_NEGATIVE, &exp.h.flags); h 626 fs/nfsd/export.c if (exp.h.expiry_time < seconds_since_boot()) h 673 fs/nfsd/export.c struct cache_head *h) h 677 fs/nfsd/export.c if (h ==NULL) { h 681 fs/nfsd/export.c exp = container_of(h, struct svc_export, h); h 686 fs/nfsd/export.c if (test_bit(CACHE_VALID, &h->flags) && h 687 fs/nfsd/export.c !test_bit(CACHE_NEGATIVE, &h->flags)) { h 706 fs/nfsd/export.c struct svc_export *orig = container_of(a, struct svc_export, h); h 707 fs/nfsd/export.c struct svc_export *new = container_of(b, struct svc_export, h); h 714 fs/nfsd/export.c struct svc_export *new = container_of(cnew, struct svc_export, h); h 715 fs/nfsd/export.c struct svc_export *item = container_of(citem, struct svc_export, h); h 731 fs/nfsd/export.c struct svc_export *new = container_of(cnew, struct svc_export, h); h 732 fs/nfsd/export.c struct svc_export *item = container_of(citem, struct svc_export, h); h 760 fs/nfsd/export.c return &i->h; h 796 fs/nfsd/export.c ch = sunrpc_cache_lookup_rcu(exp->cd, &exp->h, hash); h 798 fs/nfsd/export.c return container_of(ch, struct svc_export, h); h 809 fs/nfsd/export.c ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash); h 811 fs/nfsd/export.c return container_of(ch, struct svc_export, h); h 834 fs/nfsd/export.c err = cache_check(cd, &ek->h, reqp); h 857 fs/nfsd/export.c err = cache_check(cd, &exp->h, reqp); h 946 fs/nfsd/export.c cache_put(&ek->h, nn->svc_expkey_cache); h 1217 fs/nfsd/export.c struct svc_export *exp = container_of(cp, struct svc_export, h); h 1227 fs/nfsd/export.c if (cache_check(cd, &exp->h, NULL)) h 50 fs/nfsd/export.h struct cache_head h; h 72 fs/nfsd/export.h struct cache_head h; h 107 fs/nfsd/export.h cache_put(&exp->h, exp->cd); h 112 fs/nfsd/export.h cache_get(&exp->h); h 63 fs/nfsd/nfs4idmap.c struct cache_head h; h 79 fs/nfsd/nfs4idmap.c struct ent *new = container_of(cnew, struct ent, h); h 80 fs/nfsd/nfs4idmap.c struct ent *itm = container_of(citm, struct ent, h); h 92 fs/nfsd/nfs4idmap.c struct ent *map = container_of(ref, struct ent, h.ref); h 101 fs/nfsd/nfs4idmap.c return &e->h; h 129 fs/nfsd/nfs4idmap.c struct ent *ent = container_of(ch, struct ent, h); h 143 fs/nfsd/nfs4idmap.c struct ent *a = container_of(ca, struct ent, h); h 144 fs/nfsd/nfs4idmap.c struct ent *b = container_of(cb, struct ent, h); h 151 fs/nfsd/nfs4idmap.c idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) h 155 fs/nfsd/nfs4idmap.c if (h == NULL) { h 159 fs/nfsd/nfs4idmap.c ent = container_of(h, struct ent, h); h 163 fs/nfsd/nfs4idmap.c if (test_bit(CACHE_VALID, &h->flags)) h 235 fs/nfsd/nfs4idmap.c ent.h.expiry_time = get_expiry(&buf); h 236 fs/nfsd/nfs4idmap.c if (ent.h.expiry_time == 0) h 250 fs/nfsd/nfs4idmap.c set_bit(CACHE_NEGATIVE, &ent.h.flags); h 258 fs/nfsd/nfs4idmap.c cache_put(&res->h, cd); h 268 fs/nfsd/nfs4idmap.c struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h, h 271 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); h 279 fs/nfsd/nfs4idmap.c struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h, h 282 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); h 302 fs/nfsd/nfs4idmap.c struct ent *ent = container_of(ch, struct ent, h); h 314 fs/nfsd/nfs4idmap.c struct ent *a = container_of(ca, struct ent, h); h 315 fs/nfsd/nfs4idmap.c struct ent *b = container_of(cb, struct ent, h); h 322 fs/nfsd/nfs4idmap.c nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) h 326 fs/nfsd/nfs4idmap.c if (h == NULL) { h 330 fs/nfsd/nfs4idmap.c ent = container_of(h, struct ent, h); h 334 fs/nfsd/nfs4idmap.c if (test_bit(CACHE_VALID, &h->flags)) h 396 fs/nfsd/nfs4idmap.c ent.h.expiry_time = get_expiry(&buf); h 397 fs/nfsd/nfs4idmap.c if (ent.h.expiry_time == 0) h 405 fs/nfsd/nfs4idmap.c set_bit(CACHE_NEGATIVE, &ent.h.flags); h 415 fs/nfsd/nfs4idmap.c cache_put(&res->h, cd); h 426 fs/nfsd/nfs4idmap.c struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h, h 429 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); h 437 fs/nfsd/nfs4idmap.c struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h, h 440 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); h 502 fs/nfsd/nfs4idmap.c ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle); h 509 fs/nfsd/nfs4idmap.c cache_put(&(*item)->h, detail); h 544 fs/nfsd/nfs4idmap.c cache_put(&item->h, nn->nametoid_cache); h 585 fs/nfsd/nfs4idmap.c cache_put(&item->h, nn->idtoname_cache); h 93 fs/nilfs2/segbuf.h #define nilfs_for_each_segbuf_before(s, t, h) \ h 94 fs/nilfs2/segbuf.h for ((s) = NILFS_FIRST_SEGBUF(h); (s) != (t); \ h 19 fs/nls/nls_euc-jp.c #define IS_SJIS_JISX0208(h, l) ((((0x81 <= (h)) && ((h) <= 0x9F)) \ h 20 fs/nls/nls_euc-jp.c || ((0xE0 <= (h)) && ((h) <= 0xEA))) \ h 23 fs/nls/nls_euc-jp.c #define IS_SJIS_UDC_LOW(h, l) (((0xF0 <= (h)) && ((h) <= 0xF4)) \ h 25 fs/nls/nls_euc-jp.c #define IS_SJIS_UDC_HI(h, l) (((0xF5 <= (h)) && ((h) <= 0xF9)) \ h 27 fs/nls/nls_euc-jp.c #define IS_SJIS_IBM(h, l) (((0xFA <= (h)) && ((h) <= 0xFC)) \ h 29 fs/nls/nls_euc-jp.c #define IS_SJIS_NECIBM(h, l) (((0xED <= (h)) && ((h) <= 0xEE)) \ h 44 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0208(h, l) (IS_EUC_BYTE(h) && IS_EUC_BYTE(l)) h 45 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0201KANA(h, l) (((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF)) h 46 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_LOW(h, l) (((0xF5 <= (h)) && ((h) <= 0xFE)) \ h 48 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_HI(h, l) IS_EUC_UDC_LOW(h, l) /* G3 block */ h 141 fs/nls/nls_euc-jp.c #define IS_EUC_IBM2JISX0208(h, l) \ h 142 fs/nls/nls_euc-jp.c (((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8)) h 185 fs/orangefs/protocol.h * by <linux/xattr.h> h 67 fs/orangefs/xattr.c struct hlist_head *h; h 69 fs/orangefs/xattr.c h = &orangefs_inode->xattr_cache[xattr_key(key)]; h 70 fs/orangefs/xattr.c if (hlist_empty(h)) h 72 fs/orangefs/xattr.c hlist_for_each_entry_safe(cx, tmp, h, node) { h 250 fs/orangefs/xattr.c struct hlist_head *h; h 294 fs/orangefs/xattr.c h = &orangefs_inode->xattr_cache[xattr_key(name)]; h 295 fs/orangefs/xattr.c hlist_for_each_entry_safe(cx, tmp, h, node) { h 321 fs/orangefs/xattr.c struct hlist_head *h; h 385 fs/orangefs/xattr.c h = &orangefs_inode->xattr_cache[xattr_key(name)]; h 386 fs/orangefs/xattr.c hlist_for_each_entry_safe(cx, tmp, h, node) { h 535 fs/proc/proc_sysctl.c struct ctl_table_header *h = NULL; h 548 fs/proc/proc_sysctl.c p = lookup_entry(&h, ctl_dir, name->name, name->len); h 553 fs/proc/proc_sysctl.c ret = sysctl_follow_link(&h, &p); h 559 fs/proc/proc_sysctl.c inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); h 569 fs/proc/proc_sysctl.c if (h) h 570 fs/proc/proc_sysctl.c sysctl_head_finish(h); h 781 fs/proc/proc_sysctl.c struct ctl_table_header *h = NULL; h 796 fs/proc/proc_sysctl.c for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { h 797 fs/proc/proc_sysctl.c if (!scan(h, entry, &pos, file, ctx)) { h 798 fs/proc/proc_sysctl.c sysctl_head_finish(h); h 1579 fs/reiserfs/do_balan.c int get_left_neighbor_position(struct tree_balance *tb, int h) h 1581 fs/reiserfs/do_balan.c int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); h 1583 fs/reiserfs/do_balan.c RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL, h 1585 fs/reiserfs/do_balan.c h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h)); h 1588 fs/reiserfs/do_balan.c return B_NR_ITEMS(tb->FL[h]); h 1593 fs/reiserfs/do_balan.c int get_right_neighbor_position(struct tree_balance *tb, int h) h 1595 fs/reiserfs/do_balan.c int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); h 1597 fs/reiserfs/do_balan.c RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL, h 1599 fs/reiserfs/do_balan.c h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]); h 1601 fs/reiserfs/do_balan.c if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h))) h 1739 fs/reiserfs/do_balan.c int h; h 1742 fs/reiserfs/do_balan.c for (h = 1; tb->insert_size[h]; h++) { h 1743 fs/reiserfs/do_balan.c check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h), h 1745 fs/reiserfs/do_balan.c if (tb->lnum[h]) h 1746 fs/reiserfs/do_balan.c check_internal_node(tb->tb_sb, tb->L[h], "BAD L"); h 1747 fs/reiserfs/do_balan.c if (tb->rnum[h]) h 1748 fs/reiserfs/do_balan.c check_internal_node(tb->tb_sb, tb->R[h], "BAD R"); h 1851 fs/reiserfs/do_balan.c int h; /* level of the tree being processed */ h 1895 fs/reiserfs/do_balan.c for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++) h 1896 fs/reiserfs/do_balan.c child_pos = balance_internal(tb, h, child_pos, insert_key, h 51 fs/reiserfs/fix_node.c static void create_virtual_node(struct tree_balance *tb, int h) h 58 fs/reiserfs/fix_node.c Sh = PATH_H_PBUFFER(tb->tb_path, h); h 62 fs/reiserfs/fix_node.c MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h]; h 65 fs/reiserfs/fix_node.c if (h) { h 194 fs/reiserfs/fix_node.c static void check_left(struct tree_balance *tb, int h, int cur_free) h 204 fs/reiserfs/fix_node.c if (h > 0) { h 205 fs/reiserfs/fix_node.c tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE); h 213 fs/reiserfs/fix_node.c tb->lnum[h] = 0; h 280 fs/reiserfs/fix_node.c static void check_right(struct tree_balance *tb, int h, int cur_free) h 290 fs/reiserfs/fix_node.c if (h > 0) { h 291 fs/reiserfs/fix_node.c tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE); h 299 fs/reiserfs/fix_node.c tb->rnum[h] = 0; h 316 fs/reiserfs/fix_node.c tb->rnum[h] = vn->vn_nr_item; h 374 fs/reiserfs/fix_node.c static int get_num_ver(int mode, struct tree_balance *tb, int h, h 417 fs/reiserfs/fix_node.c RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE), h 420 fs/reiserfs/fix_node.c max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h)); h 430 fs/reiserfs/fix_node.c if (h > 0) { h 621 fs/reiserfs/fix_node.c static void set_parameters(struct tree_balance *tb, int h, int lnum, h 625 fs/reiserfs/fix_node.c tb->lnum[h] = lnum; h 626 fs/reiserfs/fix_node.c tb->rnum[h] = rnum; h 627 fs/reiserfs/fix_node.c tb->blknum[h] = blk_num; h 630 fs/reiserfs/fix_node.c if (h == 0) { h 641 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum); h 642 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum); h 644 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb); h 645 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb); h 759 fs/reiserfs/fix_node.c if (h)\ h 766 fs/reiserfs/fix_node.c set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\ h 771 fs/reiserfs/fix_node.c set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\ h 774 fs/reiserfs/fix_node.c set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\ h 779 fs/reiserfs/fix_node.c if (h)\ h 785 fs/reiserfs/fix_node.c set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\ h 790 fs/reiserfs/fix_node.c set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\ h 793 fs/reiserfs/fix_node.c set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\ h 827 fs/reiserfs/fix_node.c static int get_empty_nodes(struct tree_balance *tb, int h) h 829 fs/reiserfs/fix_node.c struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h); h 858 fs/reiserfs/fix_node.c counter < h; counter++) h 865 fs/reiserfs/fix_node.c amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1; h 915 fs/reiserfs/fix_node.c static int get_lfree(struct tree_balance *tb, int h) h 920 fs/reiserfs/fix_node.c if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || h 921 fs/reiserfs/fix_node.c (l = tb->FL[h]) == NULL) h 925 fs/reiserfs/fix_node.c order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1; h 938 fs/reiserfs/fix_node.c static int get_rfree(struct tree_balance *tb, int h) h 943 fs/reiserfs/fix_node.c if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || h 944 fs/reiserfs/fix_node.c (r = tb->FR[h]) == NULL) h 948 fs/reiserfs/fix_node.c order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1; h 959 fs/reiserfs/fix_node.c static int is_left_neighbor_in_cache(struct tree_balance *tb, int h) h 967 fs/reiserfs/fix_node.c if (!tb->FL[h]) h 971 fs/reiserfs/fix_node.c father = PATH_H_PBUFFER(tb->tb_path, h + 1); h 975 fs/reiserfs/fix_node.c !B_IS_IN_TREE(tb->FL[h]) || h 977 fs/reiserfs/fix_node.c !buffer_uptodate(tb->FL[h]), h 979 fs/reiserfs/fix_node.c father, tb->FL[h]); h 985 fs/reiserfs/fix_node.c left_neighbor_position = (father == tb->FL[h]) ? h 986 fs/reiserfs/fix_node.c tb->lkey[h] : B_NR_ITEMS(tb->FL[h]); h 989 fs/reiserfs/fix_node.c B_N_CHILD_NUM(tb->FL[h], left_neighbor_position); h 1024 fs/reiserfs/fix_node.c int h, h 1035 fs/reiserfs/fix_node.c path_offset = PATH_H_PATH_OFFSET(path, h); h 1129 fs/reiserfs/fix_node.c LEFT_PARENTS) ? (tb->lkey[h - 1] = h 1131 fs/reiserfs/fix_node.c 1) : (tb->rkey[h - h 1140 fs/reiserfs/fix_node.c h + 1) == IO_ERROR) h 1152 fs/reiserfs/fix_node.c RFALSE(B_LEVEL(*pfather) != h + 1, h 1172 fs/reiserfs/fix_node.c static int get_parents(struct tree_balance *tb, int h) h 1177 fs/reiserfs/fix_node.c path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); h 1187 fs/reiserfs/fix_node.c brelse(tb->FL[h]); h 1188 fs/reiserfs/fix_node.c brelse(tb->CFL[h]); h 1189 fs/reiserfs/fix_node.c brelse(tb->FR[h]); h 1190 fs/reiserfs/fix_node.c brelse(tb->CFR[h]); h 1191 fs/reiserfs/fix_node.c tb->FL[h] = NULL; h 1192 fs/reiserfs/fix_node.c tb->CFL[h] = NULL; h 1193 fs/reiserfs/fix_node.c tb->FR[h] = NULL; h 1194 fs/reiserfs/fix_node.c tb->CFR[h] = NULL; h 1206 fs/reiserfs/fix_node.c tb->lkey[h] = position - 1; h 1216 fs/reiserfs/fix_node.c if ((ret = get_far_parent(tb, h + 1, &curf, h 1222 fs/reiserfs/fix_node.c brelse(tb->FL[h]); h 1223 fs/reiserfs/fix_node.c tb->FL[h] = curf; /* New initialization of FL[h]. */ h 1224 fs/reiserfs/fix_node.c brelse(tb->CFL[h]); h 1225 fs/reiserfs/fix_node.c tb->CFL[h] = curcf; /* New initialization of CFL[h]. */ h 1234 fs/reiserfs/fix_node.c if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) { h 1242 fs/reiserfs/fix_node.c get_far_parent(tb, h + 1, &curf, &curcf, h 1251 fs/reiserfs/fix_node.c tb->rkey[h] = position; h 1254 fs/reiserfs/fix_node.c brelse(tb->FR[h]); h 1256 fs/reiserfs/fix_node.c tb->FR[h] = curf; h 1258 fs/reiserfs/fix_node.c brelse(tb->CFR[h]); h 1260 fs/reiserfs/fix_node.c tb->CFR[h] = curcf; h 1274 fs/reiserfs/fix_node.c struct tree_balance *tb, int h) h 1276 fs/reiserfs/fix_node.c struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h); h 1277 fs/reiserfs/fix_node.c int levbytes = tb->insert_size[h]; h 1282 fs/reiserfs/fix_node.c if (tb->CFR[h]) h 1283 fs/reiserfs/fix_node.c r_key = internal_key(tb->CFR[h], tb->rkey[h]); h 1288 fs/reiserfs/fix_node.c ((!h h 1291 fs/reiserfs/fix_node.c ((!h && r_key h 1293 fs/reiserfs/fix_node.c + ((h) ? KEY_SIZE : 0)) { h 1297 fs/reiserfs/fix_node.c if (!h) h 1301 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 1305 fs/reiserfs/fix_node.c PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]); h 1324 fs/reiserfs/fix_node.c static int ip_check_balance(struct tree_balance *tb, int h) h 1373 fs/reiserfs/fix_node.c Sh = PATH_H_PBUFFER(tb->tb_path, h); h 1374 fs/reiserfs/fix_node.c levbytes = tb->insert_size[h]; h 1378 fs/reiserfs/fix_node.c if (!h) h 1381 fs/reiserfs/fix_node.c switch (ret = get_empty_nodes(tb, h)) { h 1384 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 1397 fs/reiserfs/fix_node.c ret = get_parents(tb, h); h 1404 fs/reiserfs/fix_node.c rfree = get_rfree(tb, h); h 1405 fs/reiserfs/fix_node.c lfree = get_lfree(tb, h); h 1408 fs/reiserfs/fix_node.c if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) == h 1412 fs/reiserfs/fix_node.c create_virtual_node(tb, h); h 1420 fs/reiserfs/fix_node.c check_left(tb, h, lfree); h 1428 fs/reiserfs/fix_node.c check_right(tb, h, rfree); h 1434 fs/reiserfs/fix_node.c if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) { h 1446 fs/reiserfs/fix_node.c ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + h 1448 fs/reiserfs/fix_node.c tb->rnum[h]); h 1449 fs/reiserfs/fix_node.c set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, h 1458 fs/reiserfs/fix_node.c RFALSE(h && h 1459 fs/reiserfs/fix_node.c (tb->lnum[h] >= vn->vn_nr_item + 1 || h 1460 fs/reiserfs/fix_node.c tb->rnum[h] >= vn->vn_nr_item + 1), h 1462 fs/reiserfs/fix_node.c RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) || h 1463 fs/reiserfs/fix_node.c (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))), h 1470 fs/reiserfs/fix_node.c if (!h && is_leaf_removable(tb)) h 1482 fs/reiserfs/fix_node.c if (!h) h 1484 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 1512 fs/reiserfs/fix_node.c lpar = tb->lnum[h]; h 1513 fs/reiserfs/fix_node.c rpar = tb->rnum[h]; h 1523 fs/reiserfs/fix_node.c nver = get_num_ver(vn->vn_mode, tb, h, h 1524 fs/reiserfs/fix_node.c 0, -1, h ? vn->vn_nr_item : 0, -1, h 1527 fs/reiserfs/fix_node.c if (!h) { h 1534 fs/reiserfs/fix_node.c nver1 = get_num_ver(vn->vn_mode, tb, h, h 1550 fs/reiserfs/fix_node.c lnver = get_num_ver(vn->vn_mode, tb, h, h 1551 fs/reiserfs/fix_node.c lpar - ((h || tb->lbytes == -1) ? 0 : 1), h 1552 fs/reiserfs/fix_node.c -1, h ? vn->vn_nr_item : 0, -1, h 1554 fs/reiserfs/fix_node.c if (!h) { h 1557 fs/reiserfs/fix_node.c lnver1 = get_num_ver(vn->vn_mode, tb, h, h 1575 fs/reiserfs/fix_node.c rnver = get_num_ver(vn->vn_mode, tb, h, h 1577 fs/reiserfs/fix_node.c h ? (vn->vn_nr_item - rpar) : (rpar - h 1583 fs/reiserfs/fix_node.c if (!h) { h 1586 fs/reiserfs/fix_node.c rnver1 = get_num_ver(vn->vn_mode, tb, h, h 1605 fs/reiserfs/fix_node.c lrnver = get_num_ver(vn->vn_mode, tb, h, h 1606 fs/reiserfs/fix_node.c lpar - ((h || tb->lbytes == -1) ? 0 : 1), h 1608 fs/reiserfs/fix_node.c h ? (vn->vn_nr_item - rpar) : (rpar - h 1614 fs/reiserfs/fix_node.c if (!h) { h 1617 fs/reiserfs/fix_node.c lrnver1 = get_num_ver(vn->vn_mode, tb, h, h 1638 fs/reiserfs/fix_node.c RFALSE(h && h 1639 fs/reiserfs/fix_node.c (tb->lnum[h] != 1 || h 1640 fs/reiserfs/fix_node.c tb->rnum[h] != 1 || h 1642 fs/reiserfs/fix_node.c || h != 1), "vs-8230: bad h"); h 1644 fs/reiserfs/fix_node.c set_parameters(tb, h, tb->lnum[h], tb->rnum[h], h 1648 fs/reiserfs/fix_node.c set_parameters(tb, h, h 1649 fs/reiserfs/fix_node.c tb->lnum[h] - h 1651 fs/reiserfs/fix_node.c tb->rnum[h] - h 1663 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1, h 1695 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h)) { h 1726 fs/reiserfs/fix_node.c static int dc_check_balance_internal(struct tree_balance *tb, int h) h 1738 fs/reiserfs/fix_node.c Sh = PATH_H_PBUFFER(tb->tb_path, h); h 1739 fs/reiserfs/fix_node.c Fh = PATH_H_PPARENT(tb->tb_path, h); h 1747 fs/reiserfs/fix_node.c create_virtual_node(tb, h); h 1752 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 1760 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 0, NULL, -1, -1); h 1764 fs/reiserfs/fix_node.c if ((ret = get_parents(tb, h)) != CARRY_ON) h 1768 fs/reiserfs/fix_node.c rfree = get_rfree(tb, h); h 1769 fs/reiserfs/fix_node.c lfree = get_lfree(tb, h); h 1772 fs/reiserfs/fix_node.c check_left(tb, h, lfree); h 1773 fs/reiserfs/fix_node.c check_right(tb, h, rfree); h 1786 fs/reiserfs/fix_node.c if (tb->lnum[h] >= vn->vn_nr_item + 1) { h 1793 fs/reiserfs/fix_node.c h)) == h 1794 fs/reiserfs/fix_node.c 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; h 1795 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / h 1797 fs/reiserfs/fix_node.c set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, h 1803 fs/reiserfs/fix_node.c if (tb->rnum[h] >= vn->vn_nr_item + 1) { h 1810 fs/reiserfs/fix_node.c h)) == h 1812 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / h 1814 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, h 1824 fs/reiserfs/fix_node.c if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { h 1828 fs/reiserfs/fix_node.c ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - h 1829 fs/reiserfs/fix_node.c tb->rnum[h] + vn->vn_nr_item + 1) / 2 - h 1830 fs/reiserfs/fix_node.c (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]); h 1831 fs/reiserfs/fix_node.c set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, h 1837 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 1846 fs/reiserfs/fix_node.c if (tb->lnum[h] >= vn->vn_nr_item + 1) h 1847 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h) h 1848 fs/reiserfs/fix_node.c || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) { h 1855 fs/reiserfs/fix_node.c h)) == h 1856 fs/reiserfs/fix_node.c 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; h 1857 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE + h 1859 fs/reiserfs/fix_node.c set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1); h 1864 fs/reiserfs/fix_node.c if (tb->rnum[h] >= vn->vn_nr_item + 1) { h 1871 fs/reiserfs/fix_node.c h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1); h 1872 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE + h 1874 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1); h 1879 fs/reiserfs/fix_node.c if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { h 1883 fs/reiserfs/fix_node.c ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + h 1885 fs/reiserfs/fix_node.c tb->rnum[h]); h 1886 fs/reiserfs/fix_node.c set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, h 1892 fs/reiserfs/fix_node.c RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root"); h 1895 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) { h 1899 fs/reiserfs/fix_node.c (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item + h 1901 fs/reiserfs/fix_node.c set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1); h 1905 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, h 1906 fs/reiserfs/fix_node.c -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item + h 1925 fs/reiserfs/fix_node.c static int dc_check_balance_leaf(struct tree_balance *tb, int h) h 1950 fs/reiserfs/fix_node.c levbytes = tb->insert_size[h]; h 1959 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 1963 fs/reiserfs/fix_node.c if ((ret = get_parents(tb, h)) != CARRY_ON) h 1967 fs/reiserfs/fix_node.c rfree = get_rfree(tb, h); h 1968 fs/reiserfs/fix_node.c lfree = get_lfree(tb, h); h 1970 fs/reiserfs/fix_node.c create_virtual_node(tb, h); h 1982 fs/reiserfs/fix_node.c check_left(tb, h, lfree); h 1983 fs/reiserfs/fix_node.c check_right(tb, h, rfree); h 1987 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */ h 1988 fs/reiserfs/fix_node.c !tb->FR[h]) { h 1990 fs/reiserfs/fix_node.c RFALSE(!tb->FL[h], h 1994 fs/reiserfs/fix_node.c set_parameters(tb, h, -1, 0, 0, NULL, -1, -1); h 2000 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -1, 0, NULL, -1, -1); h 2013 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); h 2031 fs/reiserfs/fix_node.c static int dc_check_balance(struct tree_balance *tb, int h) h 2033 fs/reiserfs/fix_node.c RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)), h 2036 fs/reiserfs/fix_node.c if (h) h 2037 fs/reiserfs/fix_node.c return dc_check_balance_internal(tb, h); h 2039 fs/reiserfs/fix_node.c return dc_check_balance_leaf(tb, h); h 2063 fs/reiserfs/fix_node.c int h, h 2082 fs/reiserfs/fix_node.c if (tb->insert_size[h] > 0) h 2083 fs/reiserfs/fix_node.c return ip_check_balance(tb, h); h 2086 fs/reiserfs/fix_node.c return dc_check_balance(tb, h); h 2090 fs/reiserfs/fix_node.c static int get_direct_parent(struct tree_balance *tb, int h) h 2095 fs/reiserfs/fix_node.c path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); h 2151 fs/reiserfs/fix_node.c static int get_neighbors(struct tree_balance *tb, int h) h 2154 fs/reiserfs/fix_node.c path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1); h 2160 fs/reiserfs/fix_node.c PROC_INFO_INC(sb, get_neighbors[h]); h 2162 fs/reiserfs/fix_node.c if (tb->lnum[h]) { h 2164 fs/reiserfs/fix_node.c PROC_INFO_INC(sb, need_l_neighbor[h]); h 2167 fs/reiserfs/fix_node.c RFALSE(bh == tb->FL[h] && h 2173 fs/reiserfs/fix_node.c tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb-> h 2174 fs/reiserfs/fix_node.c FL[h]); h 2175 fs/reiserfs/fix_node.c son_number = B_N_CHILD_NUM(tb->FL[h], child_position); h 2183 fs/reiserfs/fix_node.c PROC_INFO_INC(sb, get_neighbors_restart[h]); h 2187 fs/reiserfs/fix_node.c RFALSE(!B_IS_IN_TREE(tb->FL[h]) || h 2188 fs/reiserfs/fix_node.c child_position > B_NR_ITEMS(tb->FL[h]) || h 2189 fs/reiserfs/fix_node.c B_N_CHILD_NUM(tb->FL[h], child_position) != h 2192 fs/reiserfs/fix_node.c RFALSE(!h && h 2198 fs/reiserfs/fix_node.c brelse(tb->L[h]); h 2199 fs/reiserfs/fix_node.c tb->L[h] = bh; h 2203 fs/reiserfs/fix_node.c if (tb->rnum[h]) { h 2204 fs/reiserfs/fix_node.c PROC_INFO_INC(sb, need_r_neighbor[h]); h 2207 fs/reiserfs/fix_node.c RFALSE(bh == tb->FR[h] && h 2214 fs/reiserfs/fix_node.c (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0; h 2215 fs/reiserfs/fix_node.c son_number = B_N_CHILD_NUM(tb->FR[h], child_position); h 2223 fs/reiserfs/fix_node.c PROC_INFO_INC(sb, get_neighbors_restart[h]); h 2226 fs/reiserfs/fix_node.c brelse(tb->R[h]); h 2227 fs/reiserfs/fix_node.c tb->R[h] = bh; h 2229 fs/reiserfs/fix_node.c RFALSE(!h h 2548 fs/reiserfs/fix_node.c int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path); h 2630 fs/reiserfs/fix_node.c for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) { h 2631 fs/reiserfs/fix_node.c ret = get_direct_parent(tb, h); h 2635 fs/reiserfs/fix_node.c ret = check_balance(op_mode, tb, h, item_num, h 2640 fs/reiserfs/fix_node.c ret = get_neighbors(tb, h); h 2643 fs/reiserfs/fix_node.c if (h != MAX_HEIGHT - 1) h 2644 fs/reiserfs/fix_node.c tb->insert_size[h + 1] = 0; h 2654 fs/reiserfs/fix_node.c ret = get_neighbors(tb, h); h 2662 fs/reiserfs/fix_node.c ret = get_empty_nodes(tb, h); h 2670 fs/reiserfs/fix_node.c if (!PATH_H_PBUFFER(tb->tb_path, h)) { h 2672 fs/reiserfs/fix_node.c RFALSE(tb->blknum[h] != 1, h 2675 fs/reiserfs/fix_node.c if (h < MAX_HEIGHT - 1) h 2676 fs/reiserfs/fix_node.c tb->insert_size[h + 1] = 0; h 2677 fs/reiserfs/fix_node.c } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) { h 2684 fs/reiserfs/fix_node.c if (tb->blknum[h] > 1) { h 2686 fs/reiserfs/fix_node.c RFALSE(h == MAX_HEIGHT - 1, h 2689 fs/reiserfs/fix_node.c tb->insert_size[h + 1] = h 2691 fs/reiserfs/fix_node.c KEY_SIZE) * (tb->blknum[h] - 1) + h 2693 fs/reiserfs/fix_node.c } else if (h < MAX_HEIGHT - 1) h 2694 fs/reiserfs/fix_node.c tb->insert_size[h + 1] = 0; h 2696 fs/reiserfs/fix_node.c tb->insert_size[h + 1] = h 2697 fs/reiserfs/fix_node.c (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1); h 29 fs/reiserfs/ibalance.c int h, h 42 fs/reiserfs/ibalance.c src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); h 43 fs/reiserfs/ibalance.c src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 44 fs/reiserfs/ibalance.c src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 46 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->L[h]; h 47 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FL[h]; h 48 fs/reiserfs/ibalance.c dest_bi->bi_position = get_left_neighbor_position(tb, h); h 49 fs/reiserfs/ibalance.c *d_key = tb->lkey[h]; h 50 fs/reiserfs/ibalance.c *cf = tb->CFL[h]; h 54 fs/reiserfs/ibalance.c src_bi->bi_bh = tb->L[h]; h 55 fs/reiserfs/ibalance.c src_bi->bi_parent = tb->FL[h]; h 56 fs/reiserfs/ibalance.c src_bi->bi_position = get_left_neighbor_position(tb, h); h 58 fs/reiserfs/ibalance.c dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); h 59 fs/reiserfs/ibalance.c dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 61 fs/reiserfs/ibalance.c dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 62 fs/reiserfs/ibalance.c *d_key = tb->lkey[h]; h 63 fs/reiserfs/ibalance.c *cf = tb->CFL[h]; h 69 fs/reiserfs/ibalance.c src_bi->bi_bh = tb->R[h]; h 70 fs/reiserfs/ibalance.c src_bi->bi_parent = tb->FR[h]; h 71 fs/reiserfs/ibalance.c src_bi->bi_position = get_right_neighbor_position(tb, h); h 73 fs/reiserfs/ibalance.c dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); h 74 fs/reiserfs/ibalance.c dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 75 fs/reiserfs/ibalance.c dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 76 fs/reiserfs/ibalance.c *d_key = tb->rkey[h]; h 77 fs/reiserfs/ibalance.c *cf = tb->CFR[h]; h 82 fs/reiserfs/ibalance.c src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); h 83 fs/reiserfs/ibalance.c src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 84 fs/reiserfs/ibalance.c src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 86 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->R[h]; h 87 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FR[h]; h 88 fs/reiserfs/ibalance.c dest_bi->bi_position = get_right_neighbor_position(tb, h); h 89 fs/reiserfs/ibalance.c *d_key = tb->rkey[h]; h 90 fs/reiserfs/ibalance.c *cf = tb->CFR[h]; h 95 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->L[h]; h 96 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FL[h]; h 97 fs/reiserfs/ibalance.c dest_bi->bi_position = get_left_neighbor_position(tb, h); h 102 fs/reiserfs/ibalance.c dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); h 103 fs/reiserfs/ibalance.c dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 104 fs/reiserfs/ibalance.c dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 109 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->R[h]; h 110 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FR[h]; h 111 fs/reiserfs/ibalance.c dest_bi->bi_position = get_right_neighbor_position(tb, h); h 494 fs/reiserfs/ibalance.c int h, int pointer_amount) h 500 fs/reiserfs/ibalance.c internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi, h 535 fs/reiserfs/ibalance.c int h, int pointer_amount) h 541 fs/reiserfs/ibalance.c internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, h 566 fs/reiserfs/ibalance.c int h, int pointer_amount) h 573 fs/reiserfs/ibalance.c internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi, h 585 fs/reiserfs/ibalance.c RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ || h 586 fs/reiserfs/ibalance.c dest_bi.bi_bh != tb->R[h], h 588 fs/reiserfs/ibalance.c src_bi.bi_bh, PATH_H_PBUFFER(tb->tb_path, h)); h 590 fs/reiserfs/ibalance.c if (tb->CFL[h]) h 591 fs/reiserfs/ibalance.c replace_key(tb, cf, d_key_position, tb->CFL[h], h 592 fs/reiserfs/ibalance.c tb->lkey[h]); h 610 fs/reiserfs/ibalance.c int h, int pointer_amount) h 616 fs/reiserfs/ibalance.c internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, h 633 fs/reiserfs/ibalance.c int h, int child_pos) h 637 fs/reiserfs/ibalance.c struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h); h 640 fs/reiserfs/ibalance.c insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE)); h 645 fs/reiserfs/ibalance.c bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 646 fs/reiserfs/ibalance.c bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 650 fs/reiserfs/ibalance.c RFALSE(tb->blknum[h] > 1, h 651 fs/reiserfs/ibalance.c "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]); h 655 fs/reiserfs/ibalance.c if (tb->lnum[h] == 0 && tb->rnum[h] == 0) { h 656 fs/reiserfs/ibalance.c if (tb->blknum[h] == 0) { h 668 fs/reiserfs/ibalance.c if (!tb->L[h - 1] || !B_NR_ITEMS(tb->L[h - 1])) h 669 fs/reiserfs/ibalance.c new_root = tb->R[h - 1]; h 671 fs/reiserfs/ibalance.c new_root = tb->L[h - 1]; h 685 fs/reiserfs/ibalance.c if (h > 1) h 697 fs/reiserfs/ibalance.c if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) { h 699 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] != 0, h 701 fs/reiserfs/ibalance.c h, tb->rnum[h]); h 703 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1); h 710 fs/reiserfs/ibalance.c if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) { h 711 fs/reiserfs/ibalance.c RFALSE(tb->lnum[h] != 0, h 713 fs/reiserfs/ibalance.c h, tb->lnum[h]); h 715 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1); h 722 fs/reiserfs/ibalance.c if (tb->lnum[h] < 0) { h 723 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] != 0, h 724 fs/reiserfs/ibalance.c "wrong tb->rnum[%d]==%d when borrow from L[h]", h, h 725 fs/reiserfs/ibalance.c tb->rnum[h]); h 726 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h, h 727 fs/reiserfs/ibalance.c -tb->lnum[h]); h 732 fs/reiserfs/ibalance.c if (tb->rnum[h] < 0) { h 733 fs/reiserfs/ibalance.c RFALSE(tb->lnum[h] != 0, h 735 fs/reiserfs/ibalance.c h, tb->lnum[h]); h 736 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]); /*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]); */ h 741 fs/reiserfs/ibalance.c if (tb->lnum[h] > 0) { h 742 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1, h 744 fs/reiserfs/ibalance.c h, tb->lnum[h], h, tb->rnum[h], n); h 746 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]); /*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]); */ h 747 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, h 748 fs/reiserfs/ibalance.c tb->rnum[h]); h 756 fs/reiserfs/ibalance.c h, tb->lnum[h], h, tb->rnum[h]); h 760 fs/reiserfs/ibalance.c static void replace_lkey(struct tree_balance *tb, int h, struct item_head *key) h 762 fs/reiserfs/ibalance.c RFALSE(tb->L[h] == NULL || tb->CFL[h] == NULL, h 764 fs/reiserfs/ibalance.c tb->L[h], tb->CFL[h]); h 766 fs/reiserfs/ibalance.c if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0) h 769 fs/reiserfs/ibalance.c memcpy(internal_key(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE); h 771 fs/reiserfs/ibalance.c do_balance_mark_internal_dirty(tb, tb->CFL[h], 0); h 775 fs/reiserfs/ibalance.c static void replace_rkey(struct tree_balance *tb, int h, struct item_head *key) h 777 fs/reiserfs/ibalance.c RFALSE(tb->R[h] == NULL || tb->CFR[h] == NULL, h 779 fs/reiserfs/ibalance.c tb->R[h], tb->CFR[h]); h 780 fs/reiserfs/ibalance.c RFALSE(B_NR_ITEMS(tb->R[h]) == 0, h 782 fs/reiserfs/ibalance.c B_NR_ITEMS(tb->R[h])); h 784 fs/reiserfs/ibalance.c memcpy(internal_key(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE); h 786 fs/reiserfs/ibalance.c do_balance_mark_internal_dirty(tb, tb->CFR[h], 0); h 804 fs/reiserfs/ibalance.c int h, /* level of the tree */ h 811 fs/reiserfs/ibalance.c struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h); h 825 fs/reiserfs/ibalance.c RFALSE(h < 1, "h (%d) can not be < 1 on internal level", h); h 827 fs/reiserfs/ibalance.c PROC_INFO_INC(tb->tb_sb, balance_at[h]); h 831 fs/reiserfs/ibalance.c h + 1) /*tb->S[h]->b_item_order */ : 0; h 837 fs/reiserfs/ibalance.c insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE)); h 843 fs/reiserfs/ibalance.c RFALSE(h > 1 && (insert_num > 1 || insert_num < -1), h 845 fs/reiserfs/ibalance.c insert_num, h); h 849 fs/reiserfs/ibalance.c balance_internal_when_delete(tb, h, child_pos); h 854 fs/reiserfs/ibalance.c if (tb->lnum[h] > 0) { h 860 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tb->L[h]); /* number of items in L[h] */ h 861 fs/reiserfs/ibalance.c if (tb->lnum[h] <= child_pos) { h 863 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, h 864 fs/reiserfs/ibalance.c tb->lnum[h]); h 865 fs/reiserfs/ibalance.c child_pos -= tb->lnum[h]; h 866 fs/reiserfs/ibalance.c } else if (tb->lnum[h] > child_pos + insert_num) { h 868 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, h 869 fs/reiserfs/ibalance.c tb->lnum[h] - insert_num); h 872 fs/reiserfs/ibalance.c bi.bi_bh = tb->L[h]; h 873 fs/reiserfs/ibalance.c bi.bi_parent = tb->FL[h]; h 874 fs/reiserfs/ibalance.c bi.bi_position = get_left_neighbor_position(tb, h); h 889 fs/reiserfs/ibalance.c internal_shift1_left(tb, h, child_pos + 1); h 891 fs/reiserfs/ibalance.c k = tb->lnum[h] - child_pos - 1; h 893 fs/reiserfs/ibalance.c bi.bi_bh = tb->L[h]; h 894 fs/reiserfs/ibalance.c bi.bi_parent = tb->FL[h]; h 895 fs/reiserfs/ibalance.c bi.bi_position = get_left_neighbor_position(tb, h); h 901 fs/reiserfs/ibalance.c replace_lkey(tb, h, insert_key + k); h 923 fs/reiserfs/ibalance.c if (tb->rnum[h] > 0) { h 930 fs/reiserfs/ibalance.c if (n - tb->rnum[h] >= child_pos) h 932 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, h 933 fs/reiserfs/ibalance.c tb->rnum[h]); h 934 fs/reiserfs/ibalance.c else if (n + insert_num - tb->rnum[h] < child_pos) { h 936 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, h 937 fs/reiserfs/ibalance.c tb->rnum[h] - insert_num); h 941 fs/reiserfs/ibalance.c bi.bi_bh = tb->R[h]; h 942 fs/reiserfs/ibalance.c bi.bi_parent = tb->FR[h]; h 943 fs/reiserfs/ibalance.c bi.bi_position = get_right_neighbor_position(tb, h); h 947 fs/reiserfs/ibalance.c tb->rnum[h] - 1, h 955 fs/reiserfs/ibalance.c internal_shift1_right(tb, h, n - child_pos + 1); h 957 fs/reiserfs/ibalance.c k = tb->rnum[h] - n + child_pos - 1; h 959 fs/reiserfs/ibalance.c bi.bi_bh = tb->R[h]; h 960 fs/reiserfs/ibalance.c bi.bi_parent = tb->FR[h]; h 961 fs/reiserfs/ibalance.c bi.bi_position = get_right_neighbor_position(tb, h); h 967 fs/reiserfs/ibalance.c replace_rkey(tb, h, insert_key + insert_num - k - 1); h 973 fs/reiserfs/ibalance.c dc = B_N_CHILD(tb->R[h], 0); h 983 fs/reiserfs/ibalance.c do_balance_mark_internal_dirty(tb, tb->R[h], 0); h 990 fs/reiserfs/ibalance.c RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level"); h 991 fs/reiserfs/ibalance.c RFALSE(tb->blknum[h] < 0, "blknum can not be < 0"); h 993 fs/reiserfs/ibalance.c if (!tb->blknum[h]) { /* node S[h] is empty now */ h 1004 fs/reiserfs/ibalance.c struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1); h 1007 fs/reiserfs/ibalance.c if (tb->blknum[h] != 1) h 1013 fs/reiserfs/ibalance.c set_blkh_level(blkh, h + 1); h 1022 fs/reiserfs/ibalance.c tb->insert_size[h] -= DC_SIZE; h 1041 fs/reiserfs/ibalance.c if (tb->blknum[h] == 2) { h 1048 fs/reiserfs/ibalance.c set_blkh_level(B_BLK_HEAD(S_new), h + 1); h 1056 fs/reiserfs/ibalance.c src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 1057 fs/reiserfs/ibalance.c src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 1148 fs/reiserfs/ibalance.c bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h); h 1149 fs/reiserfs/ibalance.c bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1); h 57 fs/reiserfs/journal.c #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ h 59 fs/reiserfs/journal.c #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ h 642 fs/reiserfs/prints.c int h = 0; h 658 fs/reiserfs/prints.c for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) { h 659 fs/reiserfs/prints.c if (PATH_H_PATH_OFFSET(tb->tb_path, h) <= h 662 fs/reiserfs/prints.c h) > ILLEGAL_PATH_ELEMENT_OFFSET) { h 663 fs/reiserfs/prints.c tbSh = PATH_H_PBUFFER(tb->tb_path, h); h 664 fs/reiserfs/prints.c tbFh = PATH_H_PPARENT(tb->tb_path, h); h 671 fs/reiserfs/prints.c h, h 674 fs/reiserfs/prints.c (tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL), h 675 fs/reiserfs/prints.c (tb->L[h]) ? atomic_read(&tb->L[h]->b_count) : -1, h 676 fs/reiserfs/prints.c (tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL), h 677 fs/reiserfs/prints.c (tb->R[h]) ? atomic_read(&tb->R[h]->b_count) : -1, h 679 fs/reiserfs/prints.c (tb->FL[h]) ? (long long)(tb->FL[h]-> h 681 fs/reiserfs/prints.c (tb->FR[h]) ? (long long)(tb->FR[h]-> h 683 fs/reiserfs/prints.c (tb->CFL[h]) ? (long long)(tb->CFL[h]-> h 685 fs/reiserfs/prints.c (tb->CFR[h]) ? (long long)(tb->CFR[h]-> h 699 fs/reiserfs/prints.c h = 0; h 701 fs/reiserfs/prints.c h++; h 704 fs/reiserfs/prints.c h, tb->insert_size[h], tb->lnum[h], tb->rnum[h], h 705 fs/reiserfs/prints.c tb->blknum[h]); h 706 fs/reiserfs/prints.c } while (tb->insert_size[h]); h 713 fs/reiserfs/prints.c h = 0; h 2170 fs/reiserfs/reiserfs.h #define PATH_H_PBUFFER(path, h) \ h 2171 fs/reiserfs/reiserfs.h PATH_OFFSET_PBUFFER(path, path->path_length - (h)) h 2174 fs/reiserfs/reiserfs.h #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER(path, (h) + 1) h 2176 fs/reiserfs/reiserfs.h #define PATH_H_POSITION(path, h) \ h 2177 fs/reiserfs/reiserfs.h PATH_OFFSET_POSITION(path, path->path_length - (h)) h 2180 fs/reiserfs/reiserfs.h #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) h 3253 fs/reiserfs/reiserfs.h int get_left_neighbor_position(struct tree_balance *tb, int h); h 3254 fs/reiserfs/reiserfs.h int get_right_neighbor_position(struct tree_balance *tb, int h); h 92 fs/ubifs/log.c long long h, t; h 94 fs/ubifs/log.c h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; h 97 fs/ubifs/log.c if (h > t) h 98 fs/ubifs/log.c return c->log_bytes - h + t; h 99 fs/ubifs/log.c else if (h != t) h 100 fs/ubifs/log.c return t - h; h 1458 fs/ubifs/lpt.c int err, h, iip, shft; h 1469 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { h 1605 fs/ubifs/lpt.c int err, i, h, iip, shft; h 1620 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { h 2081 fs/ubifs/lpt.c int err = 0, i, h, iip, shft; h 2113 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { h 2116 fs/ubifs/lpt.c nnode = scan_get_nnode(c, path + h, nnode, iip); h 2123 fs/ubifs/lpt.c pnode = scan_get_pnode(c, path + h, nnode, iip); h 2135 fs/ubifs/lpt.c ret = scan_cb(c, lprops, path[h].in_tree, data); h 2142 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { h 2146 fs/ubifs/lpt.c if (path[h].in_tree) h 2148 fs/ubifs/lpt.c nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS); h 2155 fs/ubifs/lpt.c path[h].ptr.nnode = nnode; h 2156 fs/ubifs/lpt.c path[h].in_tree = 1; h 2157 fs/ubifs/lpt.c path[h + 1].cnode.parent = nnode; h 2159 fs/ubifs/lpt.c if (path[h].in_tree) h 2165 fs/ubifs/lpt.c pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS); h 2172 fs/ubifs/lpt.c path[h].ptr.pnode = pnode; h 2173 fs/ubifs/lpt.c path[h].in_tree = 1; h 2211 fs/ubifs/lpt.c h -= 1; h 2212 fs/ubifs/lpt.c ubifs_assert(c, h >= 0); h 2213 fs/ubifs/lpt.c nnode = path[h].ptr.nnode; h 2221 fs/ubifs/lpt.c h += 1; h 2222 fs/ubifs/lpt.c for (; h < c->lpt_hght; h++) { h 2223 fs/ubifs/lpt.c nnode = scan_get_nnode(c, path + h, nnode, iip); h 2230 fs/ubifs/lpt.c pnode = scan_get_pnode(c, path + h, nnode, iip); h 1342 fs/ubifs/lpt_commit.c int h, i, found; h 1348 fs/ubifs/lpt_commit.c for (h = 1; h < c->lpt_hght; h++) { h 1354 fs/ubifs/lpt_commit.c *hght = h; h 1377 fs/ubifs/lpt_commit.c int iip, h, i, found; h 1395 fs/ubifs/lpt_commit.c for (h = *hght + 1; h < c->lpt_hght; h++) { h 1401 fs/ubifs/lpt_commit.c *hght = h; h 2668 fs/unicode/mkutf8data.c unsigned char *h; h 2677 fs/unicode/mkutf8data.c h = hangul; h 2678 fs/unicode/mkutf8data.c LEAF_GEN(h) = 2; h 2679 fs/unicode/mkutf8data.c LEAF_CCC(h) = DECOMPOSE; h 2680 fs/unicode/mkutf8data.c h += 2; h 2683 fs/unicode/mkutf8data.c h += utf8encode((char *)h, li + LB); h 2686 fs/unicode/mkutf8data.c h += utf8encode((char *)h, vi + VB); h 2690 fs/unicode/mkutf8data.c h += utf8encode((char *)h, ti + TB); h 2693 fs/unicode/mkutf8data.c h[0] = '\0'; h 281 fs/unicode/utf8-norm.c unsigned char *h; h 290 fs/unicode/utf8-norm.c h = hangul; h 291 fs/unicode/utf8-norm.c LEAF_GEN(h) = 2; h 292 fs/unicode/utf8-norm.c LEAF_CCC(h) = DECOMPOSE; h 293 fs/unicode/utf8-norm.c h += 2; h 296 fs/unicode/utf8-norm.c h += utf8encode3((char *)h, li + LB); h 299 fs/unicode/utf8-norm.c h += utf8encode3((char *)h, vi + VB); h 303 fs/unicode/utf8-norm.c h += utf8encode3((char *)h, ti + TB); h 306 fs/unicode/utf8-norm.c h[0] = '\0'; h 38 fs/xfs/libxfs/xfs_defer.h struct list_head *h); h 143 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS) h 21 include/asm-generic/bitops/fls64.h __u32 h = x >> 32; h 22 include/asm-generic/bitops/fls64.h if (h) h 23 include/asm-generic/bitops/fls64.h return fls(h) + 32; h 94 include/asm-generic/hugetlb.h struct hstate *h = hstate_file(file); h 96 include/asm-generic/hugetlb.h if (len & ~huge_page_mask(h)) h 98 include/asm-generic/hugetlb.h if (addr & ~huge_page_mask(h)) h 516 include/asm-generic/tlb.h #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ h 518 include/asm-generic/tlb.h unsigned long _sz = huge_page_size(h); \ h 21 include/crypto/poly1305.h u32 h[5]; /* accumulator, base 2^26 */ h 30 include/crypto/poly1305.h struct poly1305_state h; h 49 include/crypto/poly1305.h memset(state->h, 0, sizeof(state->h)); h 31 include/crypto/streebog.h struct streebog_uint512 h; h 928 include/drm/drm_dp_helper.h #define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4) h 37 include/drm/drm_hdcp.h #define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4) h 247 include/linux/amba/clcd.h #define CHECK(e,l,h) (var->e < l || var->e > h) h 21 include/linux/bits.h #define GENMASK(h, l) \ h 23 include/linux/bits.h (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) h 25 include/linux/bits.h #define GENMASK_ULL(h, l) \ h 27 include/linux/bits.h (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) h 4 include/linux/btree-128.h struct btree_head128 { struct btree_head h; }; h 9 include/linux/btree-128.h btree_init_mempool(&head->h, mempool); h 14 include/linux/btree-128.h return btree_init(&head->h); h 19 include/linux/btree-128.h btree_destroy(&head->h); h 25 include/linux/btree-128.h return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); h 34 include/linux/btree-128.h val = btree_get_prev(&head->h, &btree_geo128, h 45 include/linux/btree-128.h return btree_insert(&head->h, &btree_geo128, h 53 include/linux/btree-128.h return btree_update(&head->h, &btree_geo128, h 60 include/linux/btree-128.h return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); h 68 include/linux/btree-128.h val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); h 81 include/linux/btree-128.h return btree_merge(&target->h, &victim->h, &btree_geo128, gfp); h 94 include/linux/btree-128.h return btree_visitor(&head->h, &btree_geo128, opaque, h 102 include/linux/btree-128.h return btree_grim_visitor(&head->h, &btree_geo128, opaque, h 11 include/linux/btree-type.h struct btree_head h; h 17 include/linux/btree-type.h btree_init_mempool(&head->h, mempool); h 22 include/linux/btree-type.h return btree_init(&head->h); h 27 include/linux/btree-type.h btree_destroy(&head->h); h 34 include/linux/btree-type.h return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp); h 41 include/linux/btree-type.h return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); h 48 include/linux/btree-type.h return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); h 55 include/linux/btree-type.h return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); h 61 include/linux/btree-type.h return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); h 67 include/linux/btree-type.h void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); h 76 include/linux/btree-type.h void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); h 84 include/linux/btree-type.h return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); h 90 include/linux/btree-type.h return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, h 97 include/linux/btree-type.h return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); h 102 include/linux/btree-type.h return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); h 107 include/linux/btree-type.h return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); h 112 include/linux/btree-type.h return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); h 126 include/linux/btree-type.h return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, h 134 include/linux/btree-type.h return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, h 205 include/linux/crush/crush.h struct crush_bucket h; h 210 include/linux/crush/crush.h struct crush_bucket h; h 217 include/linux/crush/crush.h struct crush_bucket h; /* note: h.size is _tree_ size, not number of h 224 include/linux/crush/crush.h struct crush_bucket h; h 230 include/linux/crush/crush.h struct crush_bucket h; h 621 include/linux/dma-mapping.h #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) h 622 include/linux/dma-mapping.h #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) h 53 include/linux/drbd_genl_api.h #define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h> h 55 include/linux/hugetlb.h #define for_each_hstate(h) \ h 56 include/linux/hugetlb.h for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) h 58 include/linux/hugetlb.h struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, h 108 include/linux/hugetlb.h u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, h 362 include/linux/hugetlb.h struct page *alloc_huge_page_node(struct hstate *h, int nid); h 363 include/linux/hugetlb.h struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, h 365 include/linux/hugetlb.h struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, h 367 include/linux/hugetlb.h struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, h 373 include/linux/hugetlb.h int __init __alloc_bootmem_huge_page(struct hstate *h); h 374 include/linux/hugetlb.h int __init alloc_bootmem_huge_page(struct hstate *h); h 407 include/linux/hugetlb.h static inline unsigned long huge_page_size(struct hstate *h) h 409 include/linux/hugetlb.h return (unsigned long)PAGE_SIZE << h->order; h 416 include/linux/hugetlb.h static inline unsigned long huge_page_mask(struct hstate *h) h 418 include/linux/hugetlb.h return h->mask; h 421 include/linux/hugetlb.h static inline unsigned int huge_page_order(struct hstate *h) h 423 include/linux/hugetlb.h return h->order; h 426 include/linux/hugetlb.h static inline unsigned huge_page_shift(struct hstate *h) h 428 include/linux/hugetlb.h return h->order + PAGE_SHIFT; h 431 include/linux/hugetlb.h static inline bool hstate_is_gigantic(struct hstate *h) h 433 include/linux/hugetlb.h return huge_page_order(h) >= MAX_ORDER; h 436 include/linux/hugetlb.h static inline unsigned int pages_per_huge_page(struct hstate *h) h 438 include/linux/hugetlb.h return 1 << h->order; h 441 include/linux/hugetlb.h static inline unsigned int blocks_per_huge_page(struct hstate *h) h 443 include/linux/hugetlb.h return huge_page_size(h) / 512; h 467 include/linux/hugetlb.h static inline int hstate_index(struct hstate *h) h 469 include/linux/hugetlb.h return h - hstates; h 489 include/linux/hugetlb.h static inline bool arch_hugetlb_migration_supported(struct hstate *h) h 491 include/linux/hugetlb.h if ((huge_page_shift(h) == PMD_SHIFT) || h 492 include/linux/hugetlb.h (huge_page_shift(h) == PUD_SHIFT) || h 493 include/linux/hugetlb.h (huge_page_shift(h) == PGDIR_SHIFT)) h 500 include/linux/hugetlb.h static inline bool arch_hugetlb_migration_supported(struct hstate *h) h 506 include/linux/hugetlb.h static inline bool hugepage_migration_supported(struct hstate *h) h 508 include/linux/hugetlb.h return arch_hugetlb_migration_supported(h); h 526 include/linux/hugetlb.h static inline bool hugepage_movable_supported(struct hstate *h) h 528 include/linux/hugetlb.h if (!hugepage_migration_supported(h)) h 531 include/linux/hugetlb.h if (hstate_is_gigantic(h)) h 536 include/linux/hugetlb.h static inline spinlock_t *huge_pte_lockptr(struct hstate *h, h 539 include/linux/hugetlb.h if (huge_page_size(h) == PMD_SIZE) h 541 include/linux/hugetlb.h VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); h 603 include/linux/hugetlb.h static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) h 609 include/linux/hugetlb.h alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) h 614 include/linux/hugetlb.h static inline struct page *alloc_huge_page_vma(struct hstate *h, h 621 include/linux/hugetlb.h static inline int __alloc_bootmem_huge_page(struct hstate *h) h 646 include/linux/hugetlb.h static inline unsigned long huge_page_size(struct hstate *h) h 651 include/linux/hugetlb.h static inline unsigned long huge_page_mask(struct hstate *h) h 666 include/linux/hugetlb.h static inline unsigned int huge_page_order(struct hstate *h) h 671 include/linux/hugetlb.h static inline unsigned int huge_page_shift(struct hstate *h) h 676 include/linux/hugetlb.h static inline bool hstate_is_gigantic(struct hstate *h) h 681 include/linux/hugetlb.h static inline unsigned int pages_per_huge_page(struct hstate *h) h 691 include/linux/hugetlb.h static inline int hstate_index(struct hstate *h) h 712 include/linux/hugetlb.h static inline bool hugepage_migration_supported(struct hstate *h) h 717 include/linux/hugetlb.h static inline bool hugepage_movable_supported(struct hstate *h) h 722 include/linux/hugetlb.h static inline spinlock_t *huge_pte_lockptr(struct hstate *h, h 742 include/linux/hugetlb.h static inline spinlock_t *huge_pte_lock(struct hstate *h, h 747 include/linux/hugetlb.h ptl = huge_pte_lockptr(h, mm, pte); h 32 include/linux/iio/trigger_consumer.h irqreturn_t (*h)(int irq, void *p); h 42 include/linux/iio/trigger_consumer.h *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), h 11 include/linux/iio/triggered_buffer.h irqreturn_t (*h)(int irq, void *p), h 18 include/linux/iio/triggered_buffer.h irqreturn_t (*h)(int irq, void *p), h 8 include/linux/iio/triggered_event.h irqreturn_t (*h)(int irq, void *p), h 44 include/linux/inet_diag.h void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, h 1111 include/linux/libata.h extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); h 741 include/linux/list.h static inline void INIT_HLIST_NODE(struct hlist_node *h) h 743 include/linux/list.h h->next = NULL; h 744 include/linux/list.h h->pprev = NULL; h 747 include/linux/list.h static inline int hlist_unhashed(const struct hlist_node *h) h 749 include/linux/list.h return !h->pprev; h 752 include/linux/list.h static inline int hlist_empty(const struct hlist_head *h) h 754 include/linux/list.h return !READ_ONCE(h->first); h 782 include/linux/list.h static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) h 784 include/linux/list.h struct hlist_node *first = h->first; h 788 include/linux/list.h WRITE_ONCE(h->first, n); h 789 include/linux/list.h n->pprev = &h->first; h 819 include/linux/list.h static inline bool hlist_fake(struct hlist_node *h) h 821 include/linux/list.h return h->pprev == &h->next; h 829 include/linux/list.h hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) h 831 include/linux/list.h return !n->next && n->pprev == &h->first; h 44 include/linux/list_bl.h static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) h 46 include/linux/list_bl.h h->next = NULL; h 47 include/linux/list_bl.h h->pprev = NULL; h 52 include/linux/list_bl.h static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h) h 54 include/linux/list_bl.h return !h->pprev; h 57 include/linux/list_bl.h static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) h 60 include/linux/list_bl.h ((unsigned long)h->first & ~LIST_BL_LOCKMASK); h 63 include/linux/list_bl.h static inline void hlist_bl_set_first(struct hlist_bl_head *h, h 67 include/linux/list_bl.h LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != h 69 include/linux/list_bl.h h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); h 72 include/linux/list_bl.h static inline bool hlist_bl_empty(const struct hlist_bl_head *h) h 74 include/linux/list_bl.h return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); h 78 include/linux/list_bl.h struct hlist_bl_head *h) h 80 include/linux/list_bl.h struct hlist_bl_node *first = hlist_bl_first(h); h 85 include/linux/list_bl.h n->pprev = &h->first; h 86 include/linux/list_bl.h hlist_bl_set_first(h, n); h 59 include/linux/list_nulls.h static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) h 61 include/linux/list_nulls.h return !h->pprev; h 64 include/linux/list_nulls.h static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) h 66 include/linux/list_nulls.h return is_a_nulls(READ_ONCE(h->first)); h 70 include/linux/list_nulls.h struct hlist_nulls_head *h) h 72 include/linux/list_nulls.h struct hlist_nulls_node *first = h->first; h 75 include/linux/list_nulls.h WRITE_ONCE(n->pprev, &h->first); h 76 include/linux/list_nulls.h h->first = n; h 34 include/linux/platform_data/dma-atmel.h #define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ h 36 include/linux/platform_data/dma-atmel.h #define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ h 37 include/linux/platform_data/dma-atmel.h #define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ h 42 include/linux/platform_data/dma-atmel.h #define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ h 47 include/linux/platform_data/dma-atmel.h #define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ h 526 include/linux/rculist.h struct hlist_head *h) h 528 include/linux/rculist.h struct hlist_node *first = h->first; h 531 include/linux/rculist.h n->pprev = &h->first; h 532 include/linux/rculist.h rcu_assign_pointer(hlist_first_rcu(h), n); h 557 include/linux/rculist.h struct hlist_head *h) h 562 include/linux/rculist.h for (i = h->first; i; i = i->next) h 570 include/linux/rculist.h hlist_add_head_rcu(n, h); h 11 include/linux/rculist_bl.h static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, h 15 include/linux/rculist_bl.h LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != h 17 include/linux/rculist_bl.h rcu_assign_pointer(h->first, h 21 include/linux/rculist_bl.h static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) h 24 include/linux/rculist_bl.h ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK); h 100 include/linux/rculist_bl.h struct hlist_bl_head *h) h 105 include/linux/rculist_bl.h first = hlist_bl_first(h); h 110 include/linux/rculist_bl.h n->pprev = &h->first; h 113 include/linux/rculist_bl.h hlist_bl_set_first_rcu(h, n); h 92 include/linux/rculist_nulls.h struct hlist_nulls_head *h) h 94 include/linux/rculist_nulls.h struct hlist_nulls_node *first = h->first; h 97 include/linux/rculist_nulls.h WRITE_ONCE(n->pprev, &h->first); h 98 include/linux/rculist_nulls.h rcu_assign_pointer(hlist_nulls_first_rcu(h), n); h 123 include/linux/rculist_nulls.h struct hlist_nulls_head *h) h 128 include/linux/rculist_nulls.h for (i = h->first; !is_a_nulls(i); i = i->next) h 136 include/linux/rculist_nulls.h hlist_nulls_add_head_rcu(n, h); h 49 include/linux/soc/qcom/apr.h #define APR_HDR_FIELD_VER(h) (h & 0x000F) h 50 include/linux/soc/qcom/apr.h #define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4) h 51 include/linux/soc/qcom/apr.h #define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4) h 52 include/linux/soc/qcom/apr.h #define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8) h 22 include/linux/sock_diag.h int sock_diag_register(const struct sock_diag_handler *h); h 23 include/linux/sock_diag.h void sock_diag_unregister(const struct sock_diag_handler *h); h 53 include/linux/spi/mxs-spi.h #define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070) h 64 include/linux/spi/mxs-spi.h #define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080) h 100 include/linux/spi/mxs-spi.h #define HW_SSP_DATA(h) (ssp_is_old(h) ? 0x070 : 0x090) h 102 include/linux/spi/mxs-spi.h #define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0) h 103 include/linux/spi/mxs-spi.h #define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0) h 104 include/linux/spi/mxs-spi.h #define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0) h 105 include/linux/spi/mxs-spi.h #define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0) h 106 include/linux/spi/mxs-spi.h #define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100) h 85 include/linux/sunrpc/cache.h struct cache_head *h); h 177 include/linux/sunrpc/cache.h sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); h 182 include/linux/sunrpc/cache.h static inline struct cache_head *cache_get(struct cache_head *h) h 184 include/linux/sunrpc/cache.h kref_get(&h->ref); h 185 include/linux/sunrpc/cache.h return h; h 188 include/linux/sunrpc/cache.h static inline struct cache_head *cache_get_rcu(struct cache_head *h) h 190 include/linux/sunrpc/cache.h if (kref_get_unless_zero(&h->ref)) h 191 include/linux/sunrpc/cache.h return h; h 195 include/linux/sunrpc/cache.h static inline void cache_put(struct cache_head *h, struct cache_detail *cd) h 197 include/linux/sunrpc/cache.h if (kref_read(&h->ref) <= 2 && h 198 include/linux/sunrpc/cache.h h->expiry_time < cd->nextcheck) h 199 include/linux/sunrpc/cache.h cd->nextcheck = h->expiry_time; h 200 include/linux/sunrpc/cache.h kref_put(&h->ref, cd->cache_put); h 203 include/linux/sunrpc/cache.h static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) h 205 include/linux/sunrpc/cache.h if (!test_bit(CACHE_VALID, &h->flags)) h 208 include/linux/sunrpc/cache.h return (h->expiry_time < seconds_since_boot()) || h 209 include/linux/sunrpc/cache.h (detail->flush_time >= h->last_refresh); h 213 include/linux/sunrpc/cache.h struct cache_head *h, struct cache_req *rqstp); h 111 include/linux/time.h #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) h 151 include/linux/vmw_vmci_defs.h static inline bool vmci_handle_is_invalid(struct vmci_handle h) h 153 include/linux/vmw_vmci_defs.h return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE); h 235 include/media/tpg/v4l2-tpg.h void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h); h 519 include/media/tpg/v4l2-tpg.h static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h) h 521 include/media/tpg/v4l2-tpg.h tpg->buf_height = h; h 2299 include/net/bluetooth/hci.h #define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12))) h 2300 include/net/bluetooth/hci.h #define hci_handle(h) (h & 0x0fff) h 2301 include/net/bluetooth/hci.h #define hci_flags(h) (h >> 12) h 712 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 713 include/net/bluetooth/hci_core.h list_add_rcu(&c->list, &h->list); h 716 include/net/bluetooth/hci_core.h h->acl_num++; h 719 include/net/bluetooth/hci_core.h h->amp_num++; h 722 include/net/bluetooth/hci_core.h h->le_num++; h 724 include/net/bluetooth/hci_core.h h->le_num_slave++; h 728 include/net/bluetooth/hci_core.h h->sco_num++; h 735 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 742 include/net/bluetooth/hci_core.h h->acl_num--; h 745 include/net/bluetooth/hci_core.h h->amp_num--; h 748 include/net/bluetooth/hci_core.h h->le_num--; h 750 include/net/bluetooth/hci_core.h h->le_num_slave--; h 754 include/net/bluetooth/hci_core.h h->sco_num--; h 761 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 764 include/net/bluetooth/hci_core.h return h->acl_num; h 766 include/net/bluetooth/hci_core.h return h->amp_num; h 768 include/net/bluetooth/hci_core.h return h->le_num; h 771 include/net/bluetooth/hci_core.h return h->sco_num; h 786 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 792 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { h 807 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 812 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { h 826 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 831 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { h 847 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 852 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { h 870 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 875 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { h 889 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash; h 894 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { h 167 include/net/inet_hashtables.h inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash) h 169 include/net/inet_hashtables.h return &h->lhash2[hash & h->lhash2_mask]; h 188 include/net/inet_hashtables.h static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h) h 190 include/net/inet_hashtables.h kfree(h->lhash2); h 191 include/net/inet_hashtables.h h->lhash2 = NULL; h 243 include/net/inet_hashtables.h void inet_hashinfo_init(struct inet_hashinfo *h); h 244 include/net/inet_hashtables.h void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, h 248 include/net/inet_hashtables.h int inet_hashinfo2_init_mod(struct inet_hashinfo *h); h 115 include/net/netfilter/nf_conntrack_tuple.h #define NF_CT_DIRECTION(h) \ h 116 include/net/netfilter/nf_conntrack_tuple.h ((enum ip_conntrack_dir)(h)->tuple.dst.dir) h 604 include/net/sch_generic.h unsigned int h; h 609 include/net/sch_generic.h h = qdisc_class_hash(id, hash->hashmask); h 610 include/net/sch_generic.h hlist_for_each_entry(cl, &hash->hash[h], hnode) { h 1176 include/net/sock.h } h; h 89 include/scsi/scsi_transport_spi.h #define spi_signalling(h) (((struct spi_host_attrs *)(h)->shost_data)->signalling) h 84 include/trace/define_trace.h # define __TRACE_INCLUDE(system) <trace/events/system.h> h 87 include/trace/define_trace.h # define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h) h 286 include/trace/events/fs_dax.h #include <trace/define_trace.h> h 149 include/trace/events/libata.h #define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h) h 73 include/trace/events/syscalls.h #include <trace/define_trace.h> h 268 include/trace/events/v4l2.h #include <trace/define_trace.h> h 69 include/trace/events/vb2.h #include <trace/define_trace.h> h 224 include/uapi/drm/exynos_drm.h struct drm_exynos_ipp_limit_val h; h 294 include/uapi/drm/exynos_drm.h __u32 h; h 124 include/uapi/drm/i810_drm.h unsigned int h; h 142 include/uapi/drm/i810_drm.h unsigned int h; h 204 include/uapi/drm/i915_drm.h unsigned int h; h 111 include/uapi/drm/virtgpu_drm.h __u32 h; h 457 include/uapi/drm/vmwgfx_drm.h __u32 h; h 50 include/uapi/linux/dvb/video.h int h; h 184 include/uapi/linux/omapfb.h __u16 h; h 180 include/uapi/linux/pkt_cls.h #define TC_U32_HTID(h) ((h)&0xFFF00000) h 181 include/uapi/linux/pkt_cls.h #define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20) h 182 include/uapi/linux/pkt_cls.h #define TC_U32_HASH(h) (((h)>>12)&0xFF) h 183 include/uapi/linux/pkt_cls.h #define TC_U32_NODE(h) ((h)&0xFFF) h 184 include/uapi/linux/pkt_cls.h #define TC_U32_KEY(h) ((h)&0xFFFFF) h 70 include/uapi/linux/pkt_sched.h #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) h 71 include/uapi/linux/pkt_sched.h #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) h 653 include/uapi/linux/pkt_sched.h __u32 h; h 17 include/uapi/linux/romfs_fs.h #define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff)) h 18 include/uapi/linux/romfs_fs.h #define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff)) h 71 include/uapi/linux/swab.h __u32 h = val >> 32; h 73 include/uapi/linux/swab.h return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); h 214 include/uapi/linux/virtio_gpu.h __le32 w, h, d; h 701 include/video/omapfb_dss.h u16 x, u16 y, u16 w, u16 h); h 715 include/video/omapfb_dss.h u16 x, u16 y, u16 w, u16 h); h 228 include/video/pm3fb.h #define PM3VideoOverlayHeight_HEIGHT(h) (((h) & 0xfff) << 0) h 655 include/video/pm3fb.h #define PM3FBWriteMode_StripeHeight(h) (((h) & 0x7) << 9) h 685 include/video/pm3fb.h #define PM3LBDestReadMode_StripeHeight(h) (((h) & 0x7) << 5) h 706 include/video/pm3fb.h #define PM3LBSourceReadMode_StripeHeight(h) (((h) & 0x7) << 5) h 726 include/video/pm3fb.h #define PM3LBWriteMode_StripeHeight(h) (((h) & 0x7) << 6) h 974 include/video/pm3fb.h #define PM3Render2D_Height(h) (((h) & 0x0fff) << 16) h 981 include/video/pm3fb.h #define PM3Render2DGlyph_Height(h) (((h) & 0x7f) << 7) h 16 include/video/udlfb.h int w, h; h 60 include/xen/acpi.h static inline int xen_acpi_get_pxm(acpi_handle h) h 65 include/xen/acpi.h acpi_handle phandle = h; h 27 include/xen/interface/hvm/dm_op.h GUEST_HANDLE(void) h; h 129 include/xen/interface/vcpu.h GUEST_HANDLE(vcpu_runstate_info) h; h 216 include/xen/interface/vcpu.h GUEST_HANDLE(vcpu_time_info) h; h 202 ipc/msg.c struct list_head *h = &msq->q_senders; h 204 ipc/msg.c list_for_each_entry_safe(mss, t, h, list) { h 841 ipc/shm.c struct hstate *h = hstate_file(shp->shm_file); h 842 ipc/shm.c *rss_add += pages_per_huge_page(h) * mapping->nrpages; h 287 kernel/audit_watch.c int h = audit_hash_ino((u32)ino); h 298 kernel/audit_watch.c list_add_rcu(&nentry->list, &audit_inode_hash[h]); h 406 kernel/audit_watch.c int h, ret = 0; h 440 kernel/audit_watch.c h = audit_hash_ino((u32)watch->ino); h 441 kernel/audit_watch.c *list = &audit_inode_hash[h]; h 896 kernel/auditfilter.c int h; h 899 kernel/auditfilter.c h = audit_hash_ino(entry->rule.inode_f->val); h 900 kernel/auditfilter.c *p = list = &audit_inode_hash[h]; h 903 kernel/auditfilter.c for (h = 0; h < AUDIT_INODE_BUCKETS; h++) { h 904 kernel/auditfilter.c list = &audit_inode_hash[h]; h 811 kernel/auditsc.c int h = audit_hash_ino((u32)n->ino); h 812 kernel/auditsc.c struct list_head *list = &audit_inode_hash[h]; h 748 kernel/debug/kdb/kdb_support.c struct debug_alloc_header *best, *bestprev, *prev, *h; h 754 kernel/debug/kdb/kdb_support.c h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); h 756 kernel/debug/kdb/kdb_support.c h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead; h 762 kernel/debug/kdb/kdb_support.c if (h->size >= size && (!best || h->size < best->size)) { h 763 kernel/debug/kdb/kdb_support.c best = h; h 765 kernel/debug/kdb/kdb_support.c if (h->size == size) h 768 kernel/debug/kdb/kdb_support.c if (!h->next) h 770 kernel/debug/kdb/kdb_support.c prev = h; h 771 kernel/debug/kdb/kdb_support.c h = (struct debug_alloc_header *)(debug_alloc_pool + h->next); h 783 kernel/debug/kdb/kdb_support.c h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset); h 784 kernel/debug/kdb/kdb_support.c h->size = rem - dah_overhead; h 785 kernel/debug/kdb/kdb_support.c h->next = best->next; h 805 kernel/debug/kdb/kdb_support.c struct debug_alloc_header *h; h 818 kernel/debug/kdb/kdb_support.c h = (struct debug_alloc_header *)((char *)p - dah_overhead); h 819 kernel/debug/kdb/kdb_support.c memset(p, POISON_FREE, h->size - 1); h 820 kernel/debug/kdb/kdb_support.c *((char *)p + h->size - 1) = POISON_END; h 821 kernel/debug/kdb/kdb_support.c h->caller = NULL; h 822 kernel/debug/kdb/kdb_support.c dah_used -= h->size; h 823 kernel/debug/kdb/kdb_support.c h_offset = (char *)h - debug_alloc_pool; h 825 kernel/debug/kdb/kdb_support.c h->next = dah_first; h 840 kernel/debug/kdb/kdb_support.c prev->size += dah_overhead + h->size; h 841 kernel/debug/kdb/kdb_support.c memset(h, POISON_FREE, dah_overhead - 1); h 842 kernel/debug/kdb/kdb_support.c *((char *)h + dah_overhead - 1) = POISON_END; h 843 kernel/debug/kdb/kdb_support.c h = prev; h 846 kernel/debug/kdb/kdb_support.c h->next = prev->next; h 850 kernel/debug/kdb/kdb_support.c if (h_offset + dah_overhead + h->size == h->next) { h 853 kernel/debug/kdb/kdb_support.c (debug_alloc_pool + h->next); h 854 kernel/debug/kdb/kdb_support.c h->size += dah_overhead + next->size; h 855 kernel/debug/kdb/kdb_support.c h->next = next->next; h 387 kernel/irq/irqdomain.c struct irq_domain *h, *found = NULL; h 401 kernel/irq/irqdomain.c list_for_each_entry(h, &irq_domain_list, link) { h 402 kernel/irq/irqdomain.c if (h->ops->select && fwspec->param_count) h 403 kernel/irq/irqdomain.c rc = h->ops->select(h, fwspec, bus_token); h 404 kernel/irq/irqdomain.c else if (h->ops->match) h 405 kernel/irq/irqdomain.c rc = h->ops->match(h, to_of_node(fwnode), bus_token); h 407 kernel/irq/irqdomain.c rc = ((fwnode != NULL) && (h->fwnode == fwnode) && h 409 kernel/irq/irqdomain.c (h->bus_token == bus_token))); h 412 kernel/irq/irqdomain.c found = h; h 430 kernel/irq/irqdomain.c struct irq_domain *h; h 434 kernel/irq/irqdomain.c list_for_each_entry(h, &irq_domain_list, link) { h 435 kernel/irq/irqdomain.c if (irq_domain_is_msi(h) && h 436 kernel/irq/irqdomain.c !irq_domain_hierarchical_is_msi_remap(h)) { h 890 kernel/locking/lockdep.c static bool in_list(struct list_head *e, struct list_head *h) h 894 kernel/locking/lockdep.c list_for_each(f, h) { h 920 kernel/locking/lockdep.c static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) h 924 kernel/locking/lockdep.c list_for_each_entry(e, h, entry) { h 2385 kernel/rcu/tree.c static void rcu_core_si(struct softirq_action *h) h 9917 kernel/sched/fair.c static __latent_entropy void run_rebalance_domains(struct softirq_action *h) h 254 kernel/softirq.c struct softirq_action *h; h 278 kernel/softirq.c h = softirq_vec; h 284 kernel/softirq.c h += softirq_bit - 1; h 286 kernel/softirq.c vec_nr = h - softirq_vec; h 292 kernel/softirq.c h->action(h); h 296 kernel/softirq.c vec_nr, softirq_to_name[vec_nr], h->action, h 300 kernel/softirq.c h++; h 1586 kernel/time/hrtimer.c static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) h 1782 kernel/time/timer.c static __latent_entropy void run_timer_softirq(struct softirq_action *h) h 2108 kernel/trace/trace_events_filter.c .e = ve, .f = vf, .g = vg, .h = vh }, \ h 12 kernel/trace/trace_events_filter_test.h TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), h 14 kernel/trace/trace_events_filter_test.h TP_ARGS(a, b, c, d, e, f, g, h), h 24 kernel/trace/trace_events_filter_test.h __field(int, h) h 35 kernel/trace/trace_events_filter_test.h __entry->h = h; h 40 kernel/trace/trace_events_filter_test.h __entry->e, __entry->f, __entry->g, __entry->h) h 849 lib/bch.c struct gf_poly **g, struct gf_poly **h) h 860 lib/bch.c *h = NULL; h 873 lib/bch.c *h = &((struct gf_poly_deg1 *)f)[gcd->deg].poly; h 875 lib/bch.c gf_poly_copy(*h, q); h 48 lib/crypto/sha256.c u32 a, b, c, d, e, f, g, h, t1, t2; h 62 lib/crypto/sha256.c e = state[4]; f = state[5]; g = state[6]; h = state[7]; h 65 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; h 66 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 68 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 70 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 72 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 74 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 75 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5]; h 77 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6]; h 79 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7]; h 82 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8]; h 83 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 85 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 87 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 89 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 91 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 92 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13]; h 94 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14]; h 96 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15]; h 99 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16]; h 100 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 102 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 104 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 106 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 108 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 109 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; h 111 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22]; h 113 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23]; h 116 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24]; h 117 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 119 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 121 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 123 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 125 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 126 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; h 128 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30]; h 130 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31]; h 133 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32]; h 134 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 136 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 138 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 140 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 142 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 143 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; h 145 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38]; h 147 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39]; h 150 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40]; h 151 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 153 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 155 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 157 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 159 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 160 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; h 162 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46]; h 164 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47]; h 167 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48]; h 168 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 170 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 172 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 174 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 176 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 177 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; h 179 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54]; h 181 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55]; h 184 lib/crypto/sha256.c t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56]; h 185 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; h 187 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; h 189 lib/crypto/sha256.c t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; h 191 lib/crypto/sha256.c t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; h 193 lib/crypto/sha256.c t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; h 194 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; h 196 lib/crypto/sha256.c t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62]; h 198 lib/crypto/sha256.c t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63]; h 202 lib/crypto/sha256.c state[4] += e; state[5] += f; state[6] += g; state[7] += h; h 205 lib/crypto/sha256.c a = b = c = d = e = f = g = h = t1 = t2 = 0; h 69 lib/digsig.c const char *h, int hlen) h 169 lib/digsig.c if (!m || len != hlen || memcmp(m, h, hlen)) h 340 lib/inflate.c int h; /* table level */ h 446 lib/inflate.c h = -1; /* no tables yet--level -1 */ h 466 lib/inflate.c h++; h 491 lib/inflate.c if (h) h 500 lib/inflate.c u[h] = ++q; /* table starts after link */ h 504 lib/inflate.c if (h) h 506 lib/inflate.c x[h] = i; /* save pattern for backing up */ h 511 lib/inflate.c u[h-1][j] = r; /* connect to last table */ h 545 lib/inflate.c while ((i & ((1 << w) - 1)) != x[h]) h 547 lib/inflate.c h--; /* don't need to update q */ h 1090 lib/inflate.c unsigned h; /* maximum struct huft's malloc'ed */ h 1099 lib/inflate.c h = 0; h 1108 lib/inflate.c if (hufts > h) h 1109 lib/inflate.c h = hufts; h 1126 lib/inflate.c fprintf(stderr, "<%u> ", h); h 78 lib/irq_poll.c static void __latent_entropy irq_poll_softirq(struct softirq_action *h) h 93 lib/lz4/lz4_compress.c U32 h, h 103 lib/lz4/lz4_compress.c hashTable[h] = p; h 110 lib/lz4/lz4_compress.c hashTable[h] = (U32)(p - srcBase); h 117 lib/lz4/lz4_compress.c hashTable[h] = (U16)(p - srcBase); h 129 lib/lz4/lz4_compress.c U32 const h = LZ4_hashPosition(p, tableType); h 131 lib/lz4/lz4_compress.c LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); h 135 lib/lz4/lz4_compress.c U32 h, h 143 lib/lz4/lz4_compress.c return hashTable[h]; h 149 lib/lz4/lz4_compress.c return hashTable[h] + srcBase; h 156 lib/lz4/lz4_compress.c return hashTable[h] + srcBase; h 166 lib/lz4/lz4_compress.c U32 const h = LZ4_hashPosition(p, tableType); h 168 lib/lz4/lz4_compress.c return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); h 256 lib/lz4/lz4_compress.c U32 const h = forwardH; h 265 lib/lz4/lz4_compress.c match = LZ4_getPositionOnHash(h, h 281 lib/lz4/lz4_compress.c LZ4_putPositionOnHash(ip, h, dictPtr->hashTable, h 579 lib/lz4/lz4_compress.c U32 h = forwardH; h 588 lib/lz4/lz4_compress.c match = LZ4_getPositionOnHash(h, ctx->hashTable, h 592 lib/lz4/lz4_compress.c LZ4_putPositionOnHash(ip, h, h 84 lib/lz4/lz4hc_compress.c U32 const h = LZ4HC_hashPtr(base + idx); h 85 lib/lz4/lz4hc_compress.c size_t delta = idx - hashTable[h]; h 92 lib/lz4/lz4hc_compress.c hashTable[h] = idx; h 440 lib/mpi/longlong.h #define rshift_rhlc(r, h, l, c) \ h 443 lib/mpi/longlong.h "=r" (r) : "r" (h), "r" (l), "rn" (c)) h 499 lib/mpi/longlong.h #define rshift_rhlc(r, h, l, c) \ h 504 lib/mpi/longlong.h __nn.__i.__h = (h); __nn.__i.__l = (l); \ h 64 lib/plist.c # define plist_check_head(h) do { } while (0) h 311 lib/test_rhashtable.c struct rhlist_head *h, *pos; h 319 lib/test_rhashtable.c h = rhltable_lookup(&rhlt, &key, test_rht_params); h 320 lib/test_rhashtable.c if (WARN(!h, "key not found during iteration %d of %d", i, entries)) { h 327 lib/test_rhashtable.c rhl_for_each_entry_rcu(obj, pos, h, list_node) { h 337 lib/test_rhashtable.c rhl_for_each_entry_rcu(obj, pos, h, list_node) { h 303 lib/vsprintf.c uint32_t d3, d2, d1, q, h; h 309 lib/vsprintf.c h = (n >> 32); h 310 lib/vsprintf.c d2 = (h ) & 0xffff; h 311 lib/vsprintf.c d3 = (h >> 16); /* implicit "& 0xffff" */ h 142 lib/zlib_deflate/deflate.c #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) h 418 lib/zlib_deflate/deftree.c int h; /* heap index */ h 432 lib/zlib_deflate/deftree.c for (h = s->heap_max+1; h < HEAP_SIZE; h++) { h 433 lib/zlib_deflate/deftree.c n = s->heap[h]; h 474 lib/zlib_deflate/deftree.c m = s->heap[--h]; h 953 lib/zstd/compress.c static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); } h 954 lib/zstd/compress.c ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */ h 957 lib/zstd/compress.c static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); } h 958 lib/zstd/compress.c static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); } h 961 lib/zstd/compress.c static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); } h 962 lib/zstd/compress.c static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); } h 965 lib/zstd/compress.c static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); } h 966 lib/zstd/compress.c static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); } h 969 lib/zstd/compress.c static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); } h 970 lib/zstd/compress.c static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); } h 973 lib/zstd/compress.c static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); } h 974 lib/zstd/compress.c static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); } h 1037 lib/zstd/compress.c size_t const h = ZSTD_hashPtr(ip, hBits, mls); h 1039 lib/zstd/compress.c U32 const matchIndex = hashTable[h]; h 1041 lib/zstd/compress.c hashTable[h] = curr; /* update hash table */ h 1137 lib/zstd/compress.c const size_t h = ZSTD_hashPtr(ip, hBits, mls); h 1138 lib/zstd/compress.c const U32 matchIndex = hashTable[h]; h 1146 lib/zstd/compress.c hashTable[h] = curr; /* update hash table */ h 1286 lib/zstd/compress.c size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); h 1289 lib/zstd/compress.c U32 const matchIndexS = hashSmall[h]; h 1292 lib/zstd/compress.c hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ h 1565 lib/zstd/compress.c size_t const h = ZSTD_hashPtr(ip, hashLog, mls); h 1569 lib/zstd/compress.c U32 matchIndex = hashTable[h]; h 1586 lib/zstd/compress.c hashTable[h] = curr; /* Update Hash Table */ h 1648 lib/zstd/compress.c size_t const h = ZSTD_hashPtr(ip, hashLog, mls); h 1652 lib/zstd/compress.c U32 matchIndex = hashTable[h]; h 1668 lib/zstd/compress.c hashTable[h] = curr; /* Update Hash Table */ h 1807 lib/zstd/compress.c size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); h 1808 lib/zstd/compress.c NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; h 1809 lib/zstd/compress.c hashTable[h] = idx; h 241 lib/zstd/zstd_opt.h const size_t h = ZSTD_hashPtr(ip, hashLog, mls); h 243 lib/zstd/zstd_opt.h U32 matchIndex = hashTable[h]; h 293 lib/zstd/zstd_opt.h hashTable[h] = curr; /* Update Hash Table */ h 1418 mm/gup.c struct hstate *h = page_hstate(page); h 1423 mm/gup.c return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); h 75 mm/hugetlb.c static int hugetlb_acct_memory(struct hstate *h, long delta); h 94 mm/hugetlb.c struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, h 106 mm/hugetlb.c spool->hstate = h; h 109 mm/hugetlb.c if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { h 582 mm/hugetlb.c struct hstate *h = hstate_inode(inode); h 584 mm/hugetlb.c hugetlb_acct_memory(h, 1); h 623 mm/hugetlb.c static pgoff_t vma_hugecache_offset(struct hstate *h, h 626 mm/hugetlb.c return ((address - vma->vm_start) >> huge_page_shift(h)) + h 627 mm/hugetlb.c (vma->vm_pgoff >> huge_page_order(h)); h 866 mm/hugetlb.c static void enqueue_huge_page(struct hstate *h, struct page *page) h 869 mm/hugetlb.c list_move(&page->lru, &h->hugepage_freelists[nid]); h 870 mm/hugetlb.c h->free_huge_pages++; h 871 mm/hugetlb.c h->free_huge_pages_node[nid]++; h 874 mm/hugetlb.c static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) h 878 mm/hugetlb.c list_for_each_entry(page, &h->hugepage_freelists[nid], lru) h 885 mm/hugetlb.c if (&h->hugepage_freelists[nid] == &page->lru) h 887 mm/hugetlb.c list_move(&page->lru, &h->hugepage_activelist); h 889 mm/hugetlb.c h->free_huge_pages--; h 890 mm/hugetlb.c h->free_huge_pages_node[nid]--; h 894 mm/hugetlb.c static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, h 920 mm/hugetlb.c page = dequeue_huge_page_node_exact(h, node); h 931 mm/hugetlb.c static inline gfp_t htlb_alloc_mask(struct hstate *h) h 933 mm/hugetlb.c if (hugepage_movable_supported(h)) h 939 mm/hugetlb.c static struct page *dequeue_huge_page_vma(struct hstate *h, h 956 mm/hugetlb.c h->free_huge_pages - h->resv_huge_pages == 0) h 960 mm/hugetlb.c if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) h 963 mm/hugetlb.c gfp_mask = htlb_alloc_mask(h); h 965 mm/hugetlb.c page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); h 968 mm/hugetlb.c h->resv_huge_pages--; h 1006 mm/hugetlb.c static int hstate_next_node_to_alloc(struct hstate *h, h 1013 mm/hugetlb.c nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); h 1014 mm/hugetlb.c h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); h 1025 mm/hugetlb.c static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) h 1031 mm/hugetlb.c nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); h 1032 mm/hugetlb.c h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); h 1115 mm/hugetlb.c static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, h 1118 mm/hugetlb.c unsigned int order = huge_page_order(h); h 1154 mm/hugetlb.c static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); h 1157 mm/hugetlb.c static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, h 1165 mm/hugetlb.c static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, h 1175 mm/hugetlb.c static void update_and_free_page(struct hstate *h, struct page *page) h 1179 mm/hugetlb.c if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) h 1182 mm/hugetlb.c h->nr_huge_pages--; h 1183 mm/hugetlb.c h->nr_huge_pages_node[page_to_nid(page)]--; h 1184 mm/hugetlb.c for (i = 0; i < pages_per_huge_page(h); i++) { h 1193 mm/hugetlb.c if (hstate_is_gigantic(h)) { h 1194 mm/hugetlb.c destroy_compound_gigantic_page(page, huge_page_order(h)); h 1195 mm/hugetlb.c free_gigantic_page(page, huge_page_order(h)); h 1197 mm/hugetlb.c __free_pages(page, huge_page_order(h)); h 1203 mm/hugetlb.c struct hstate *h; h 1205 mm/hugetlb.c for_each_hstate(h) { h 1206 mm/hugetlb.c if (huge_page_size(h) == size) h 1207 mm/hugetlb.c return h; h 1265 mm/hugetlb.c struct hstate *h = page_hstate(page); h 1300 mm/hugetlb.c hugetlb_cgroup_uncharge_page(hstate_index(h), h 1301 mm/hugetlb.c pages_per_huge_page(h), page); h 1303 mm/hugetlb.c h->resv_huge_pages++; h 1308 mm/hugetlb.c update_and_free_page(h, page); h 1309 mm/hugetlb.c } else if (h->surplus_huge_pages_node[nid]) { h 1312 mm/hugetlb.c update_and_free_page(h, page); h 1313 mm/hugetlb.c h->surplus_huge_pages--; h 1314 mm/hugetlb.c h->surplus_huge_pages_node[nid]--; h 1317 mm/hugetlb.c enqueue_huge_page(h, page); h 1370 mm/hugetlb.c static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) h 1376 mm/hugetlb.c h->nr_huge_pages++; h 1377 mm/hugetlb.c h->nr_huge_pages_node[nid]++; h 1455 mm/hugetlb.c static struct page *alloc_buddy_huge_page(struct hstate *h, h 1459 mm/hugetlb.c int order = huge_page_order(h); h 1506 mm/hugetlb.c static struct page *alloc_fresh_huge_page(struct hstate *h, h 1512 mm/hugetlb.c if (hstate_is_gigantic(h)) h 1513 mm/hugetlb.c page = alloc_gigantic_page(h, gfp_mask, nid, nmask); h 1515 mm/hugetlb.c page = alloc_buddy_huge_page(h, gfp_mask, h 1520 mm/hugetlb.c if (hstate_is_gigantic(h)) h 1521 mm/hugetlb.c prep_compound_gigantic_page(page, huge_page_order(h)); h 1522 mm/hugetlb.c prep_new_huge_page(h, page, page_to_nid(page)); h 1531 mm/hugetlb.c static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, h 1536 mm/hugetlb.c gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; h 1538 mm/hugetlb.c for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { h 1539 mm/hugetlb.c page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, h 1559 mm/hugetlb.c static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, h 1565 mm/hugetlb.c for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { h 1570 mm/hugetlb.c if ((!acct_surplus || h->surplus_huge_pages_node[node]) && h 1571 mm/hugetlb.c !list_empty(&h->hugepage_freelists[node])) { h 1573 mm/hugetlb.c list_entry(h->hugepage_freelists[node].next, h 1576 mm/hugetlb.c h->free_huge_pages--; h 1577 mm/hugetlb.c h->free_huge_pages_node[node]--; h 1579 mm/hugetlb.c h->surplus_huge_pages--; h 1580 mm/hugetlb.c h->surplus_huge_pages_node[node]--; h 1582 mm/hugetlb.c update_and_free_page(h, page); h 1617 mm/hugetlb.c struct hstate *h = page_hstate(head); h 1619 mm/hugetlb.c if (h->free_huge_pages - h->resv_huge_pages == 0) h 1630 mm/hugetlb.c h->free_huge_pages--; h 1631 mm/hugetlb.c h->free_huge_pages_node[nid]--; h 1632 mm/hugetlb.c h->max_huge_pages--; h 1633 mm/hugetlb.c update_and_free_page(h, head); h 1671 mm/hugetlb.c static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, h 1676 mm/hugetlb.c if (hstate_is_gigantic(h)) h 1680 mm/hugetlb.c if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) h 1684 mm/hugetlb.c page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); h 1696 mm/hugetlb.c if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { h 1702 mm/hugetlb.c h->surplus_huge_pages++; h 1703 mm/hugetlb.c h->surplus_huge_pages_node[page_to_nid(page)]++; h 1712 mm/hugetlb.c struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, h 1717 mm/hugetlb.c if (hstate_is_gigantic(h)) h 1720 mm/hugetlb.c page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); h 1737 mm/hugetlb.c struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, h 1742 mm/hugetlb.c gfp_t gfp_mask = htlb_alloc_mask(h); h 1747 mm/hugetlb.c page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); h 1754 mm/hugetlb.c struct page *alloc_huge_page_node(struct hstate *h, int nid) h 1756 mm/hugetlb.c gfp_t gfp_mask = htlb_alloc_mask(h); h 1763 mm/hugetlb.c if (h->free_huge_pages - h->resv_huge_pages > 0) h 1764 mm/hugetlb.c page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); h 1768 mm/hugetlb.c page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); h 1774 mm/hugetlb.c struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, h 1777 mm/hugetlb.c gfp_t gfp_mask = htlb_alloc_mask(h); h 1780 mm/hugetlb.c if (h->free_huge_pages - h->resv_huge_pages > 0) { h 1783 mm/hugetlb.c page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); h 1791 mm/hugetlb.c return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); h 1795 mm/hugetlb.c struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, h 1804 mm/hugetlb.c gfp_mask = htlb_alloc_mask(h); h 1806 mm/hugetlb.c page = alloc_huge_page_nodemask(h, node, nodemask); h 1816 mm/hugetlb.c static int gather_surplus_pages(struct hstate *h, int delta) h 1824 mm/hugetlb.c needed = (h->resv_huge_pages + delta) - h->free_huge_pages; h 1826 mm/hugetlb.c h->resv_huge_pages += delta; h 1837 mm/hugetlb.c page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), h 1853 mm/hugetlb.c needed = (h->resv_huge_pages + delta) - h 1854 mm/hugetlb.c (h->free_huge_pages + allocated); h 1874 mm/hugetlb.c h->resv_huge_pages += delta; h 1887 mm/hugetlb.c enqueue_huge_page(h, page); h 1914 mm/hugetlb.c static void return_unused_surplus_pages(struct hstate *h, h 1920 mm/hugetlb.c if (hstate_is_gigantic(h)) h 1927 mm/hugetlb.c nr_pages = min(unused_resv_pages, h->surplus_huge_pages); h 1942 mm/hugetlb.c h->resv_huge_pages--; h 1944 mm/hugetlb.c if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) h 1951 mm/hugetlb.c h->resv_huge_pages -= unused_resv_pages; h 1985 mm/hugetlb.c static long __vma_reservation_common(struct hstate *h, h 1997 mm/hugetlb.c idx = vma_hugecache_offset(h, vma, addr); h 2046 mm/hugetlb.c static long vma_needs_reservation(struct hstate *h, h 2049 mm/hugetlb.c return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); h 2052 mm/hugetlb.c static long vma_commit_reservation(struct hstate *h, h 2055 mm/hugetlb.c return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); h 2058 mm/hugetlb.c static void vma_end_reservation(struct hstate *h, h 2061 mm/hugetlb.c (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); h 2064 mm/hugetlb.c static long vma_add_reservation(struct hstate *h, h 2067 mm/hugetlb.c return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); h 2081 mm/hugetlb.c static void restore_reserve_on_error(struct hstate *h, h 2086 mm/hugetlb.c long rc = vma_needs_reservation(h, vma, address); h 2102 mm/hugetlb.c rc = vma_add_reservation(h, vma, address); h 2110 mm/hugetlb.c vma_end_reservation(h, vma, address); h 2118 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 2125 mm/hugetlb.c idx = hstate_index(h); h 2131 mm/hugetlb.c map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); h 2145 mm/hugetlb.c vma_end_reservation(h, vma, addr); h 2161 mm/hugetlb.c ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); h 2171 mm/hugetlb.c page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); h 2174 mm/hugetlb.c page = alloc_buddy_huge_page_with_mpol(h, vma, addr); h 2179 mm/hugetlb.c h->resv_huge_pages--; h 2182 mm/hugetlb.c list_move(&page->lru, &h->hugepage_activelist); h 2185 mm/hugetlb.c hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); h 2190 mm/hugetlb.c map_commit = vma_commit_reservation(h, vma, addr); h 2204 mm/hugetlb.c hugetlb_acct_memory(h, -rsv_adjust); h 2209 mm/hugetlb.c hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); h 2213 mm/hugetlb.c vma_end_reservation(h, vma, addr); h 2217 mm/hugetlb.c int alloc_bootmem_huge_page(struct hstate *h) h 2219 mm/hugetlb.c int __alloc_bootmem_huge_page(struct hstate *h) h 2224 mm/hugetlb.c for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { h 2228 mm/hugetlb.c huge_page_size(h), huge_page_size(h), h 2243 mm/hugetlb.c BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); h 2247 mm/hugetlb.c m->hstate = h; h 2267 mm/hugetlb.c struct hstate *h = m->hstate; h 2270 mm/hugetlb.c prep_compound_huge_page(page, h->order); h 2272 mm/hugetlb.c prep_new_huge_page(h, page, page_to_nid(page)); h 2281 mm/hugetlb.c if (hstate_is_gigantic(h)) h 2282 mm/hugetlb.c adjust_managed_page_count(page, 1 << h->order); h 2287 mm/hugetlb.c static void __init hugetlb_hstate_alloc_pages(struct hstate *h) h 2292 mm/hugetlb.c if (!hstate_is_gigantic(h)) { h 2310 mm/hugetlb.c for (i = 0; i < h->max_huge_pages; ++i) { h 2311 mm/hugetlb.c if (hstate_is_gigantic(h)) { h 2312 mm/hugetlb.c if (!alloc_bootmem_huge_page(h)) h 2314 mm/hugetlb.c } else if (!alloc_pool_huge_page(h, h 2320 mm/hugetlb.c if (i < h->max_huge_pages) { h 2323 mm/hugetlb.c string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); h 2325 mm/hugetlb.c h->max_huge_pages, buf, i); h 2326 mm/hugetlb.c h->max_huge_pages = i; h 2334 mm/hugetlb.c struct hstate *h; h 2336 mm/hugetlb.c for_each_hstate(h) { h 2337 mm/hugetlb.c if (minimum_order > huge_page_order(h)) h 2338 mm/hugetlb.c minimum_order = huge_page_order(h); h 2341 mm/hugetlb.c if (!hstate_is_gigantic(h)) h 2342 mm/hugetlb.c hugetlb_hstate_alloc_pages(h); h 2349 mm/hugetlb.c struct hstate *h; h 2351 mm/hugetlb.c for_each_hstate(h) { h 2354 mm/hugetlb.c string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); h 2356 mm/hugetlb.c buf, h->free_huge_pages); h 2361 mm/hugetlb.c static void try_to_free_low(struct hstate *h, unsigned long count, h 2366 mm/hugetlb.c if (hstate_is_gigantic(h)) h 2371 mm/hugetlb.c struct list_head *freel = &h->hugepage_freelists[i]; h 2373 mm/hugetlb.c if (count >= h->nr_huge_pages) h 2378 mm/hugetlb.c update_and_free_page(h, page); h 2379 mm/hugetlb.c h->free_huge_pages--; h 2380 mm/hugetlb.c h->free_huge_pages_node[page_to_nid(page)]--; h 2385 mm/hugetlb.c static inline void try_to_free_low(struct hstate *h, unsigned long count, h 2396 mm/hugetlb.c static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, h 2404 mm/hugetlb.c for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { h 2405 mm/hugetlb.c if (h->surplus_huge_pages_node[node]) h 2409 mm/hugetlb.c for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { h 2410 mm/hugetlb.c if (h->surplus_huge_pages_node[node] < h 2411 mm/hugetlb.c h->nr_huge_pages_node[node]) h 2418 mm/hugetlb.c h->surplus_huge_pages += delta; h 2419 mm/hugetlb.c h->surplus_huge_pages_node[node] += delta; h 2423 mm/hugetlb.c #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) h 2424 mm/hugetlb.c static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, h 2451 mm/hugetlb.c count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; h 2469 mm/hugetlb.c if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { h 2470 mm/hugetlb.c if (count > persistent_huge_pages(h)) { h 2489 mm/hugetlb.c while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { h 2490 mm/hugetlb.c if (!adjust_pool_surplus(h, nodes_allowed, -1)) h 2494 mm/hugetlb.c while (count > persistent_huge_pages(h)) { h 2505 mm/hugetlb.c ret = alloc_pool_huge_page(h, nodes_allowed, h 2531 mm/hugetlb.c min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; h 2533 mm/hugetlb.c try_to_free_low(h, min_count, nodes_allowed); h 2534 mm/hugetlb.c while (min_count < persistent_huge_pages(h)) { h 2535 mm/hugetlb.c if (!free_pool_huge_page(h, nodes_allowed, 0)) h 2539 mm/hugetlb.c while (count < persistent_huge_pages(h)) { h 2540 mm/hugetlb.c if (!adjust_pool_surplus(h, nodes_allowed, 1)) h 2544 mm/hugetlb.c h->max_huge_pages = persistent_huge_pages(h); h 2581 mm/hugetlb.c struct hstate *h; h 2585 mm/hugetlb.c h = kobj_to_hstate(kobj, &nid); h 2587 mm/hugetlb.c nr_huge_pages = h->nr_huge_pages; h 2589 mm/hugetlb.c nr_huge_pages = h->nr_huge_pages_node[nid]; h 2595 mm/hugetlb.c struct hstate *h, int nid, h 2601 mm/hugetlb.c if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) h 2622 mm/hugetlb.c err = set_max_huge_pages(h, count, nid, n_mask); h 2631 mm/hugetlb.c struct hstate *h; h 2640 mm/hugetlb.c h = kobj_to_hstate(kobj, &nid); h 2641 mm/hugetlb.c return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); h 2681 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj, NULL); h 2682 mm/hugetlb.c return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); h 2690 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj, NULL); h 2692 mm/hugetlb.c if (hstate_is_gigantic(h)) h 2700 mm/hugetlb.c h->nr_overcommit_huge_pages = input; h 2710 mm/hugetlb.c struct hstate *h; h 2714 mm/hugetlb.c h = kobj_to_hstate(kobj, &nid); h 2716 mm/hugetlb.c free_huge_pages = h->free_huge_pages; h 2718 mm/hugetlb.c free_huge_pages = h->free_huge_pages_node[nid]; h 2727 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj, NULL); h 2728 mm/hugetlb.c return sprintf(buf, "%lu\n", h->resv_huge_pages); h 2735 mm/hugetlb.c struct hstate *h; h 2739 mm/hugetlb.c h = kobj_to_hstate(kobj, &nid); h 2741 mm/hugetlb.c surplus_huge_pages = h->surplus_huge_pages; h 2743 mm/hugetlb.c surplus_huge_pages = h->surplus_huge_pages_node[nid]; h 2765 mm/hugetlb.c static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, h 2770 mm/hugetlb.c int hi = hstate_index(h); h 2772 mm/hugetlb.c hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); h 2785 mm/hugetlb.c struct hstate *h; h 2792 mm/hugetlb.c for_each_hstate(h) { h 2793 mm/hugetlb.c err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, h 2796 mm/hugetlb.c pr_err("Hugetlb: Unable to add hstate %s", h->name); h 2858 mm/hugetlb.c struct hstate *h; h 2864 mm/hugetlb.c for_each_hstate(h) { h 2865 mm/hugetlb.c int idx = hstate_index(h); h 2883 mm/hugetlb.c struct hstate *h; h 2895 mm/hugetlb.c for_each_hstate(h) { h 2896 mm/hugetlb.c err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, h 2901 mm/hugetlb.c h->name, node->dev.id); h 2999 mm/hugetlb.c struct hstate *h; h 3008 mm/hugetlb.c h = &hstates[hugetlb_max_hstate++]; h 3009 mm/hugetlb.c h->order = order; h 3010 mm/hugetlb.c h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); h 3011 mm/hugetlb.c h->nr_huge_pages = 0; h 3012 mm/hugetlb.c h->free_huge_pages = 0; h 3014 mm/hugetlb.c INIT_LIST_HEAD(&h->hugepage_freelists[i]); h 3015 mm/hugetlb.c INIT_LIST_HEAD(&h->hugepage_activelist); h 3016 mm/hugetlb.c h->next_nid_to_alloc = first_memory_node; h 3017 mm/hugetlb.c h->next_nid_to_free = first_memory_node; h 3018 mm/hugetlb.c snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", h 3019 mm/hugetlb.c huge_page_size(h)/1024); h 3021 mm/hugetlb.c parsed_hstate = h; h 3089 mm/hugetlb.c struct hstate *h = &default_hstate; h 3090 mm/hugetlb.c unsigned long tmp = h->max_huge_pages; h 3103 mm/hugetlb.c ret = __nr_hugepages_store_common(obey_mempolicy, h, h 3130 mm/hugetlb.c struct hstate *h = &default_hstate; h 3137 mm/hugetlb.c tmp = h->nr_overcommit_huge_pages; h 3139 mm/hugetlb.c if (write && hstate_is_gigantic(h)) h 3150 mm/hugetlb.c h->nr_overcommit_huge_pages = tmp; h 3161 mm/hugetlb.c struct hstate *h; h 3167 mm/hugetlb.c for_each_hstate(h) { h 3168 mm/hugetlb.c unsigned long count = h->nr_huge_pages; h 3170 mm/hugetlb.c total += (PAGE_SIZE << huge_page_order(h)) * count; h 3172 mm/hugetlb.c if (h == &default_hstate) h 3180 mm/hugetlb.c h->free_huge_pages, h 3181 mm/hugetlb.c h->resv_huge_pages, h 3182 mm/hugetlb.c h->surplus_huge_pages, h 3183 mm/hugetlb.c (PAGE_SIZE << huge_page_order(h)) / 1024); h 3191 mm/hugetlb.c struct hstate *h = &default_hstate; h 3198 mm/hugetlb.c nid, h->nr_huge_pages_node[nid], h 3199 mm/hugetlb.c nid, h->free_huge_pages_node[nid], h 3200 mm/hugetlb.c nid, h->surplus_huge_pages_node[nid]); h 3205 mm/hugetlb.c struct hstate *h; h 3212 mm/hugetlb.c for_each_hstate(h) h 3215 mm/hugetlb.c h->nr_huge_pages_node[nid], h 3216 mm/hugetlb.c h->free_huge_pages_node[nid], h 3217 mm/hugetlb.c h->surplus_huge_pages_node[nid], h 3218 mm/hugetlb.c 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); h 3230 mm/hugetlb.c struct hstate *h; h 3233 mm/hugetlb.c for_each_hstate(h) h 3234 mm/hugetlb.c nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); h 3238 mm/hugetlb.c static int hugetlb_acct_memory(struct hstate *h, long delta) h 3261 mm/hugetlb.c if (gather_surplus_pages(h, delta) < 0) h 3264 mm/hugetlb.c if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { h 3265 mm/hugetlb.c return_unused_surplus_pages(h, delta); h 3272 mm/hugetlb.c return_unused_surplus_pages(h, (unsigned long) -delta); h 3297 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 3306 mm/hugetlb.c start = vma_hugecache_offset(h, vma, vma->vm_start); h 3307 mm/hugetlb.c end = vma_hugecache_offset(h, vma, vma->vm_end); h 3319 mm/hugetlb.c hugetlb_acct_memory(h, -gbl_reserve); h 3426 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 3427 mm/hugetlb.c unsigned long sz = huge_page_size(h); h 3464 mm/hugetlb.c dst_ptl = huge_pte_lock(h, dst, dst_pte); h 3465 mm/hugetlb.c src_ptl = huge_pte_lockptr(h, src, src_pte); h 3507 mm/hugetlb.c hugetlb_count_add(pages_per_huge_page(h), dst); h 3529 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 3530 mm/hugetlb.c unsigned long sz = huge_page_size(h); h 3534 mm/hugetlb.c BUG_ON(start & ~huge_page_mask(h)); h 3535 mm/hugetlb.c BUG_ON(end & ~huge_page_mask(h)); h 3557 mm/hugetlb.c ptl = huge_pte_lock(h, mm, ptep); h 3603 mm/hugetlb.c tlb_remove_huge_tlb_entry(h, tlb, ptep, address); h 3607 mm/hugetlb.c hugetlb_count_sub(pages_per_huge_page(h), mm); h 3611 mm/hugetlb.c tlb_remove_page_size(tlb, page, huge_page_size(h)); h 3674 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 3683 mm/hugetlb.c address = address & huge_page_mask(h); h 3716 mm/hugetlb.c address + huge_page_size(h), page); h 3732 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 3736 mm/hugetlb.c unsigned long haddr = address & huge_page_mask(h); h 3787 mm/hugetlb.c ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); h 3812 mm/hugetlb.c pages_per_huge_page(h)); h 3816 mm/hugetlb.c haddr + huge_page_size(h)); h 3824 mm/hugetlb.c ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); h 3842 mm/hugetlb.c restore_reserve_on_error(h, vma, haddr, new_page); h 3852 mm/hugetlb.c static struct page *hugetlbfs_pagecache_page(struct hstate *h, h 3859 mm/hugetlb.c idx = vma_hugecache_offset(h, vma, address); h 3868 mm/hugetlb.c static bool hugetlbfs_pagecache_present(struct hstate *h, h 3876 mm/hugetlb.c idx = vma_hugecache_offset(h, vma, address); h 3888 mm/hugetlb.c struct hstate *h = hstate_inode(inode); h 3902 mm/hugetlb.c inode->i_blocks += blocks_per_huge_page(h); h 3912 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 3919 mm/hugetlb.c unsigned long haddr = address & huge_page_mask(h); h 3940 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); h 3967 mm/hugetlb.c hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); h 3988 mm/hugetlb.c ptl = huge_pte_lock(h, mm, ptep); h 3998 mm/hugetlb.c clear_huge_page(page, address, pages_per_huge_page(h)); h 4026 mm/hugetlb.c VM_FAULT_SET_HINDEX(hstate_index(h)); h 4038 mm/hugetlb.c if (vma_needs_reservation(h, vma, haddr) < 0) { h 4043 mm/hugetlb.c vma_end_reservation(h, vma, haddr); h 4046 mm/hugetlb.c ptl = huge_pte_lock(h, mm, ptep); h 4047 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); h 4064 mm/hugetlb.c hugetlb_count_add(pages_per_huge_page(h), mm); h 4088 mm/hugetlb.c restore_reserve_on_error(h, vma, haddr, page); h 4094 mm/hugetlb.c u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, h 4112 mm/hugetlb.c u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, h 4129 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 4132 mm/hugetlb.c unsigned long haddr = address & huge_page_mask(h); h 4134 mm/hugetlb.c ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); h 4142 mm/hugetlb.c VM_FAULT_SET_HINDEX(hstate_index(h)); h 4144 mm/hugetlb.c ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); h 4150 mm/hugetlb.c idx = vma_hugecache_offset(h, vma, haddr); h 4157 mm/hugetlb.c hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); h 4187 mm/hugetlb.c if (vma_needs_reservation(h, vma, haddr) < 0) { h 4192 mm/hugetlb.c vma_end_reservation(h, vma, haddr); h 4195 mm/hugetlb.c pagecache_page = hugetlbfs_pagecache_page(h, h 4199 mm/hugetlb.c ptl = huge_pte_lock(h, mm, ptep); h 4271 mm/hugetlb.c struct hstate *h = hstate_vma(dst_vma); h 4285 mm/hugetlb.c pages_per_huge_page(h), false); h 4307 mm/hugetlb.c idx = vma_hugecache_offset(h, dst_vma, dst_addr); h 4313 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); h 4329 mm/hugetlb.c ptl = huge_pte_lockptr(h, dst_mm, dst_pte); h 4341 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); h 4366 mm/hugetlb.c hugetlb_count_add(pages_per_huge_page(h), dst_mm); h 4395 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 4420 mm/hugetlb.c pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), h 4421 mm/hugetlb.c huge_page_size(h)); h 4423 mm/hugetlb.c ptl = huge_pte_lock(h, mm, pte); h 4434 mm/hugetlb.c !hugetlbfs_pagecache_present(h, vma, vaddr)) { h 4496 mm/hugetlb.c pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; h 4525 mm/hugetlb.c pfn_offset < pages_per_huge_page(h)) { h 4560 mm/hugetlb.c struct hstate *h = hstate_vma(vma); h 4579 mm/hugetlb.c for (; address < end; address += huge_page_size(h)) { h 4581 mm/hugetlb.c ptep = huge_pte_offset(mm, address, huge_page_size(h)); h 4584 mm/hugetlb.c ptl = huge_pte_lock(h, mm, ptep); h 4605 mm/hugetlb.c newpte, huge_page_size(h)); h 4642 mm/hugetlb.c return pages << h->order; h 4651 mm/hugetlb.c struct hstate *h = hstate_inode(inode); h 4717 mm/hugetlb.c ret = hugetlb_acct_memory(h, gbl_reserve); h 4750 mm/hugetlb.c hugetlb_acct_memory(h, -rsv_adjust); h 4767 mm/hugetlb.c struct hstate *h = hstate_inode(inode); h 4789 mm/hugetlb.c inode->i_blocks -= (blocks_per_huge_page(h) * freed); h 4797 mm/hugetlb.c hugetlb_acct_memory(h, -gbl_reserve); h 5154 mm/hugetlb.c struct hstate *h = page_hstate(oldpage); h 5177 mm/hugetlb.c if (h->surplus_huge_pages_node[old_nid]) { h 5178 mm/hugetlb.c h->surplus_huge_pages_node[old_nid]--; h 5179 mm/hugetlb.c h->surplus_huge_pages_node[new_nid]++; h 164 mm/hugetlb_cgroup.c struct hstate *h; h 169 mm/hugetlb_cgroup.c for_each_hstate(h) { h 171 mm/hugetlb_cgroup.c list_for_each_entry(page, &h->hugepage_activelist, lru) h 357 mm/hugetlb_cgroup.c struct hstate *h = &hstates[idx]; h 360 mm/hugetlb_cgroup.c mem_fmt(buf, 32, huge_page_size(h)); h 363 mm/hugetlb_cgroup.c cft = &h->cgroup_files[0]; h 370 mm/hugetlb_cgroup.c cft = &h->cgroup_files[1]; h 376 mm/hugetlb_cgroup.c cft = &h->cgroup_files[2]; h 383 mm/hugetlb_cgroup.c cft = &h->cgroup_files[3]; h 390 mm/hugetlb_cgroup.c cft = &h->cgroup_files[4]; h 394 mm/hugetlb_cgroup.c h->cgroup_files)); h 399 mm/hugetlb_cgroup.c struct hstate *h; h 401 mm/hugetlb_cgroup.c for_each_hstate(h) { h 407 mm/hugetlb_cgroup.c if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) h 408 mm/hugetlb_cgroup.c __hugetlb_cgroup_file_init(hstate_index(h)); h 419 mm/hugetlb_cgroup.c struct hstate *h = page_hstate(oldhpage); h 431 mm/hugetlb_cgroup.c list_move(&newhpage->lru, &h->hugepage_activelist); h 572 mm/migrate.c struct hstate *h = page_hstate(src); h 573 mm/migrate.c nr_pages = pages_per_huge_page(h); h 175 mm/pagewalk.c static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, h 178 mm/pagewalk.c unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); h 186 mm/pagewalk.c struct hstate *h = hstate_vma(vma); h 188 mm/pagewalk.c unsigned long hmask = huge_page_mask(h); h 189 mm/pagewalk.c unsigned long sz = huge_page_size(h); h 195 mm/pagewalk.c next = hugetlb_entry_end(h, addr, end); h 1734 mm/slub.c static void rcu_free_slab(struct rcu_head *h) h 1736 mm/slub.c struct page *page = container_of(h, struct page, rcu_head); h 3723 mm/slub.c struct page *page, *h; h 3727 mm/slub.c list_for_each_entry_safe(page, h, &n->partial, slab_list) { h 3738 mm/slub.c list_for_each_entry_safe(page, h, &discard, slab_list) h 187 mm/userfaultfd.c struct hstate *h; h 259 mm/userfaultfd.c h = hstate_vma(dst_vma); h 265 mm/userfaultfd.c VM_BUG_ON(dst_addr & ~huge_page_mask(h)); h 272 mm/userfaultfd.c hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); h 276 mm/userfaultfd.c dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); h 303 mm/userfaultfd.c pages_per_huge_page(h), true); h 376 mm/z3fold.c unsigned long h = (unsigned long)zhdr; h 384 mm/z3fold.c return h | (1 << PAGE_HEADLESS); h 388 mm/z3fold.c h += idx; h 390 mm/z3fold.c h |= (zhdr->last_chunks << BUDDY_SHIFT); h 392 mm/z3fold.c slots->slot[idx] = h; h 402 mm/z3fold.c static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) h 404 mm/z3fold.c unsigned long addr = h; h 407 mm/z3fold.c addr = *(unsigned long *)h; h 198 net/9p/trans_xen.c struct xen_9pfs_header h; h 211 net/9p/trans_xen.c sizeof(h)) { h 220 net/9p/trans_xen.c xen_9pfs_read_packet(&h, ring->data.in, sizeof(h), h 224 net/9p/trans_xen.c req = p9_tag_lookup(priv->client, h.tag); h 226 net/9p/trans_xen.c dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag); h 227 net/9p/trans_xen.c cons += h.size; h 233 net/9p/trans_xen.c memcpy(&req->rc, &h, sizeof(h)); h 238 net/9p/trans_xen.c xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size, h 243 net/9p/trans_xen.c cons += h.size; h 259 net/bluetooth/bnep/core.c struct bnep_ext_hdr *h; h 263 net/bluetooth/bnep/core.c h = (void *) skb->data; h 264 net/bluetooth/bnep/core.c if (!skb_pull(skb, sizeof(*h))) { h 269 net/bluetooth/bnep/core.c BT_DBG("type 0x%x len %d", h->type, h->len); h 271 net/bluetooth/bnep/core.c switch (h->type & BNEP_TYPE_MASK) { h 281 net/bluetooth/bnep/core.c if (!skb_pull(skb, h->len)) { h 285 net/bluetooth/bnep/core.c } while (!err && (h->type & BNEP_EXT_HEADER)); h 1490 net/bluetooth/hci_conn.c struct hci_conn_hash *h = &hdev->conn_hash; h 1495 net/bluetooth/hci_conn.c list_for_each_entry_safe(c, n, &h->list, list) { h 1712 net/bluetooth/hci_conn.c struct hci_conn_hash *h = &hdev->conn_hash; h 1718 net/bluetooth/hci_conn.c list_for_each_entry_rcu(hcon, &h->list, list) { h 3820 net/bluetooth/hci_core.c struct hci_conn_hash *h = &hdev->conn_hash; h 3829 net/bluetooth/hci_core.c list_for_each_entry_rcu(c, &h->list, list) { h 3879 net/bluetooth/hci_core.c struct hci_conn_hash *h = &hdev->conn_hash; h 3887 net/bluetooth/hci_core.c list_for_each_entry_rcu(c, &h->list, list) { h 3901 net/bluetooth/hci_core.c struct hci_conn_hash *h = &hdev->conn_hash; h 3911 net/bluetooth/hci_core.c list_for_each_entry_rcu(conn, &h->list, list) { h 3982 net/bluetooth/hci_core.c struct hci_conn_hash *h = &hdev->conn_hash; h 3990 net/bluetooth/hci_core.c list_for_each_entry_rcu(conn, &h->list, list) { h 121 net/bridge/netfilter/ebtables.c const struct ethhdr *h = eth_hdr(skb); h 128 net/bridge/netfilter/ebtables.c ethproto = h->h_proto; h 153 net/bridge/netfilter/ebtables.c !ether_addr_equal_masked(h->h_source, e->sourcemac, h 159 net/bridge/netfilter/ebtables.c !ether_addr_equal_masked(h->h_dest, e->destmac, h 50 net/ceph/crush/crush.c kfree(b->h.items); h 58 net/ceph/crush/crush.c kfree(b->h.items); h 64 net/ceph/crush/crush.c kfree(b->h.items); h 73 net/ceph/crush/crush.c kfree(b->h.items); h 80 net/ceph/crush/crush.c kfree(b->h.items); h 138 net/ceph/crush/mapper.c return bucket_perm_choose(&bucket->h, work, x, r); h 147 net/ceph/crush/mapper.c for (i = bucket->h.size-1; i >= 0; i--) { h 148 net/ceph/crush/mapper.c __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i], h 149 net/ceph/crush/mapper.c r, bucket->h.id); h 153 net/ceph/crush/mapper.c i, x, r, bucket->h.items[i], bucket->item_weights[i], h 159 net/ceph/crush/mapper.c return bucket->h.items[i]; h 163 net/ceph/crush/mapper.c dprintk("bad list sums for bucket %d\n", bucket->h.id); h 164 net/ceph/crush/mapper.c return bucket->h.items[0]; h 171 net/ceph/crush/mapper.c int h = 0; h 173 net/ceph/crush/mapper.c h++; h 176 net/ceph/crush/mapper.c return h; h 181 net/ceph/crush/mapper.c int h = height(x); h 182 net/ceph/crush/mapper.c return x - (1 << (h-1)); h 187 net/ceph/crush/mapper.c int h = height(x); h 188 net/ceph/crush/mapper.c return x + (1 << (h-1)); h 210 net/ceph/crush/mapper.c t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r, h 211 net/ceph/crush/mapper.c bucket->h.id) * (__u64)w; h 222 net/ceph/crush/mapper.c return bucket->h.items[n >> 1]; h 236 net/ceph/crush/mapper.c for (i = 0; i < bucket->h.size; i++) { h 237 net/ceph/crush/mapper.c draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r); h 245 net/ceph/crush/mapper.c return bucket->h.items[high]; h 321 net/ceph/crush/mapper.c return bucket->h.items; h 337 net/ceph/crush/mapper.c for (i = 0; i < bucket->h.size; i++) { h 340 net/ceph/crush/mapper.c u = crush_hash32_3(bucket->h.hash, x, ids[i], r); h 372 net/ceph/crush/mapper.c return bucket->h.items[high]; h 313 net/ceph/mon_client.c struct ceph_mon_subscribe_ack *h = msg->front.iov_base; h 315 net/ceph/mon_client.c if (msg->front.iov_len < sizeof(*h)) h 317 net/ceph/mon_client.c seconds = le32_to_cpu(h->duration); h 704 net/ceph/mon_client.c struct ceph_mon_statfs *h; h 711 net/ceph/mon_client.c req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, h 726 net/ceph/mon_client.c h = req->request->front.iov_base; h 727 net/ceph/mon_client.c h->monhdr.have_version = 0; h 728 net/ceph/mon_client.c h->monhdr.session_mon = cpu_to_le16(-1); h 729 net/ceph/mon_client.c h->monhdr.session_mon_tid = 0; h 730 net/ceph/mon_client.c h->fsid = monc->monmap->fsid; h 731 net/ceph/mon_client.c h->contains_data_pool = (data_pool != CEPH_NOPOOL); h 732 net/ceph/mon_client.c h->data_pool = cpu_to_le64(data_pool); h 903 net/ceph/mon_client.c struct ceph_mon_command *h; h 922 net/ceph/mon_client.c h = req->request->front.iov_base; h 923 net/ceph/mon_client.c h->monhdr.have_version = 0; h 924 net/ceph/mon_client.c h->monhdr.session_mon = cpu_to_le16(-1); h 925 net/ceph/mon_client.c h->monhdr.session_mon_tid = 0; h 926 net/ceph/mon_client.c h->fsid = monc->monmap->fsid; h 927 net/ceph/mon_client.c h->num_strs = cpu_to_le32(1); h 928 net/ceph/mon_client.c len = sprintf(h->str, "{ \"prefix\": \"osd blacklist\", \ h 932 net/ceph/mon_client.c h->str_len = cpu_to_le32(len); h 59 net/ceph/osdmap.c ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); h 71 net/ceph/osdmap.c b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); h 74 net/ceph/osdmap.c b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); h 77 net/ceph/osdmap.c ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); h 78 net/ceph/osdmap.c for (j = 0; j < b->h.size; j++) { h 109 net/ceph/osdmap.c b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); h 112 net/ceph/osdmap.c b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); h 115 net/ceph/osdmap.c ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); h 116 net/ceph/osdmap.c for (j = 0; j < b->h.size; j++) { h 130 net/ceph/osdmap.c b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); h 133 net/ceph/osdmap.c ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); h 134 net/ceph/osdmap.c for (j = 0; j < b->h.size; j++) h 4479 net/core/dev.c static __latent_entropy void net_tx_action(struct softirq_action *h) h 6352 net/core/dev.c static __latent_entropy void net_rx_action(struct softirq_action *h) h 789 net/core/neighbour.c u32 h; h 791 net/core/neighbour.c for (h = 0; h <= PNEIGH_HASHMASK; h++) { h 792 net/core/neighbour.c np = &tbl->phash_buckets[h]; h 2551 net/core/neighbour.c int rc, h, s_h = cb->args[1]; h 2562 net/core/neighbour.c for (h = s_h; h < (1 << nht->hash_shift); h++) { h 2563 net/core/neighbour.c if (h > s_h) h 2565 net/core/neighbour.c for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; h 2587 net/core/neighbour.c cb->args[1] = h; h 2598 net/core/neighbour.c int rc, h, s_h = cb->args[3]; h 2607 net/core/neighbour.c for (h = s_h; h <= PNEIGH_HASHMASK; h++) { h 2608 net/core/neighbour.c if (h > s_h) h 2610 net/core/neighbour.c for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { h 2631 net/core/neighbour.c cb->args[3] = h; h 20 net/core/net-procfs.c struct hlist_head *h; h 23 net/core/net-procfs.c h = &net->dev_name_head[get_bucket(*pos)]; h 24 net/core/net-procfs.c hlist_for_each_entry_rcu(dev, h, name_hlist) { h 1933 net/core/rtnetlink.c int h, s_h; h 1991 net/core/rtnetlink.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 1993 net/core/rtnetlink.c head = &tgt_net->dev_index_head[h]; h 2020 net/core/rtnetlink.c cb->args[0] = h; h 4033 net/core/rtnetlink.c int h, s_h; h 4058 net/core/rtnetlink.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 4060 net/core/rtnetlink.c head = &net->dev_index_head[h]; h 4116 net/core/rtnetlink.c cb->args[0] = h; h 5092 net/core/rtnetlink.c int h, s_h, err, s_idx, s_idxattr, s_prividx; h 5119 net/core/rtnetlink.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 5121 net/core/rtnetlink.c head = &net->dev_index_head[h]; h 5148 net/core/rtnetlink.c cb->args[0] = h; h 110 net/dccp/ccids/lib/packet_history.c void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, h 114 net/dccp/ccids/lib/packet_history.c struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h); h 120 net/dccp/ccids/lib/packet_history.c int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) h 125 net/dccp/ccids/lib/packet_history.c if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0) h 128 net/dccp/ccids/lib/packet_history.c for (i = 1; i <= h->loss_count; i++) h 129 net/dccp/ccids/lib/packet_history.c if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq) h 135 net/dccp/ccids/lib/packet_history.c static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) h 137 net/dccp/ccids/lib/packet_history.c const u8 idx_a = tfrc_rx_hist_index(h, a), h 138 net/dccp/ccids/lib/packet_history.c idx_b = tfrc_rx_hist_index(h, b); h 140 net/dccp/ccids/lib/packet_history.c swap(h->ring[idx_a], h->ring[idx_b]); h 152 net/dccp/ccids/lib/packet_history.c static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1) h 154 net/dccp/ccids/lib/packet_history.c u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, h 158 net/dccp/ccids/lib/packet_history.c h->loss_count = 1; h 159 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1); h 163 net/dccp/ccids/lib/packet_history.c static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2) h 165 net/dccp/ccids/lib/packet_history.c u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, h 166 net/dccp/ccids/lib/packet_history.c s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, h 170 net/dccp/ccids/lib/packet_history.c h->loss_count = 2; h 171 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2); h 178 net/dccp/ccids/lib/packet_history.c u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; h 182 net/dccp/ccids/lib/packet_history.c h->loss_count = 0; h 183 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 1); h 186 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2); h 192 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_swap(h, 0, 3); h 193 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 3); h 194 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2); h 195 net/dccp/ccids/lib/packet_history.c h->loss_count = 2; h 200 net/dccp/ccids/lib/packet_history.c static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) h 202 net/dccp/ccids/lib/packet_history.c u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, h 203 net/dccp/ccids/lib/packet_history.c s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, h 204 net/dccp/ccids/lib/packet_history.c s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, h 208 net/dccp/ccids/lib/packet_history.c h->loss_count = 3; h 209 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3); h 219 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_swap(h, 2, 3); h 220 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3); h 221 net/dccp/ccids/lib/packet_history.c h->loss_count = 3; h 228 net/dccp/ccids/lib/packet_history.c u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; h 232 net/dccp/ccids/lib/packet_history.c u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp; h 236 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 2); h 237 net/dccp/ccids/lib/packet_history.c h->loss_count = 0; h 240 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 1); h 241 net/dccp/ccids/lib/packet_history.c h->loss_count = 1; h 245 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3); h 254 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_swap(h, 0, 3); h 255 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 3); h 256 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3); h 257 net/dccp/ccids/lib/packet_history.c h->loss_count = 3; h 263 net/dccp/ccids/lib/packet_history.c static void __three_after_loss(struct tfrc_rx_hist *h) h 271 net/dccp/ccids/lib/packet_history.c u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, h 272 net/dccp/ccids/lib/packet_history.c s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, h 273 net/dccp/ccids/lib/packet_history.c s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno; h 274 net/dccp/ccids/lib/packet_history.c u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp, h 275 net/dccp/ccids/lib/packet_history.c n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp; h 281 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 3); h 282 net/dccp/ccids/lib/packet_history.c h->loss_count = 0; h 285 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 2); h 286 net/dccp/ccids/lib/packet_history.c h->loss_count = 1; h 290 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 1); h 291 net/dccp/ccids/lib/packet_history.c h->loss_count = 2; h 311 net/dccp/ccids/lib/packet_history.c int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, h 318 net/dccp/ccids/lib/packet_history.c if (h->loss_count == 0) { h 319 net/dccp/ccids/lib/packet_history.c __do_track_loss(h, skb, ndp); h 320 net/dccp/ccids/lib/packet_history.c } else if (h->loss_count == 1) { h 321 net/dccp/ccids/lib/packet_history.c __one_after_loss(h, skb, ndp); h 322 net/dccp/ccids/lib/packet_history.c } else if (h->loss_count != 2) { h 323 net/dccp/ccids/lib/packet_history.c DCCP_BUG("invalid loss_count %d", h->loss_count); h 324 net/dccp/ccids/lib/packet_history.c } else if (__two_after_loss(h, skb, ndp)) { h 328 net/dccp/ccids/lib/packet_history.c is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk); h 329 net/dccp/ccids/lib/packet_history.c __three_after_loss(h); h 334 net/dccp/ccids/lib/packet_history.c int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) h 339 net/dccp/ccids/lib/packet_history.c h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); h 340 net/dccp/ccids/lib/packet_history.c if (h->ring[i] == NULL) h 344 net/dccp/ccids/lib/packet_history.c h->loss_count = h->loss_start = 0; h 349 net/dccp/ccids/lib/packet_history.c kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); h 350 net/dccp/ccids/lib/packet_history.c h->ring[i] = NULL; h 355 net/dccp/ccids/lib/packet_history.c void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) h 360 net/dccp/ccids/lib/packet_history.c if (h->ring[i] != NULL) { h 361 net/dccp/ccids/lib/packet_history.c kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); h 362 net/dccp/ccids/lib/packet_history.c h->ring[i] = NULL; h 370 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h) h 372 net/dccp/ccids/lib/packet_history.c return h->ring[0]; h 379 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h) h 381 net/dccp/ccids/lib/packet_history.c return h->ring[h->rtt_sample_prev]; h 389 net/dccp/ccids/lib/packet_history.c u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb) h 393 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); h 396 net/dccp/ccids/lib/packet_history.c if (h->rtt_sample_prev == 2) { /* previous candidate stored */ h 397 net/dccp/ccids/lib/packet_history.c sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, h 398 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); h 401 net/dccp/ccids/lib/packet_history.c ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp, h 402 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp); h 411 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, h 412 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); h 414 net/dccp/ccids/lib/packet_history.c h->rtt_sample_prev = 1; h 419 net/dccp/ccids/lib/packet_history.c sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp)); h 421 net/dccp/ccids/lib/packet_history.c h->rtt_sample_prev = 2; h 430 net/dccp/ccids/lib/packet_history.c h->rtt_sample_prev = 0; /* use current entry as next reference */ h 91 net/dccp/ccids/lib/packet_history.h static inline u8 tfrc_rx_hist_index(const struct tfrc_rx_hist *h, const u8 n) h 93 net/dccp/ccids/lib/packet_history.h return (h->loss_start + n) & TFRC_NDUPACK; h 100 net/dccp/ccids/lib/packet_history.h tfrc_rx_hist_last_rcv(const struct tfrc_rx_hist *h) h 102 net/dccp/ccids/lib/packet_history.h return h->ring[tfrc_rx_hist_index(h, h->loss_count)]; h 109 net/dccp/ccids/lib/packet_history.h tfrc_rx_hist_entry(const struct tfrc_rx_hist *h, const u8 n) h 111 net/dccp/ccids/lib/packet_history.h return h->ring[tfrc_rx_hist_index(h, n)]; h 118 net/dccp/ccids/lib/packet_history.h tfrc_rx_hist_loss_prev(const struct tfrc_rx_hist *h) h 120 net/dccp/ccids/lib/packet_history.h return h->ring[h->loss_start]; h 124 net/dccp/ccids/lib/packet_history.h static inline bool tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h) h 126 net/dccp/ccids/lib/packet_history.h return h->loss_count > 0; h 129 net/dccp/ccids/lib/packet_history.h void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, const struct sk_buff *skb, h 132 net/dccp/ccids/lib/packet_history.h int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb); h 135 net/dccp/ccids/lib/packet_history.h int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, struct tfrc_loss_hist *lh, h 138 net/dccp/ccids/lib/packet_history.h u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb); h 139 net/dccp/ccids/lib/packet_history.h int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h); h 140 net/dccp/ccids/lib/packet_history.h void tfrc_rx_hist_purge(struct tfrc_rx_hist *h); h 963 net/dccp/ipv4.c .h.hashinfo = &dccp_hashinfo, h 1051 net/dccp/ipv6.c .h.hashinfo = &dccp_hashinfo, h 1724 net/decnet/dn_route.c int h, s_h; h 1740 net/decnet/dn_route.c for(h = 0; h <= dn_rt_hash_mask; h++) { h 1741 net/decnet/dn_route.c if (h < s_h) h 1743 net/decnet/dn_route.c if (h > s_h) h 1746 net/decnet/dn_route.c for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; h 1765 net/decnet/dn_route.c cb->args[0] = h; h 89 net/decnet/dn_table.c u16 h = le16_to_cpu(key.datum)>>(16 - dz->dz_order); h 90 net/decnet/dn_table.c h ^= (h >> 10); h 91 net/decnet/dn_table.c h ^= (h >> 6); h 92 net/decnet/dn_table.c h &= DZ_HASHMASK(dz); h 93 net/decnet/dn_table.c return *(dn_fib_idx_t *)&h; h 443 net/decnet/dn_table.c int h, s_h; h 446 net/decnet/dn_table.c for(h = 0; h < dz->dz_divisor; h++) { h 447 net/decnet/dn_table.c if (h < s_h) h 449 net/decnet/dn_table.c if (h > s_h) h 451 net/decnet/dn_table.c if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL) h 453 net/decnet/dn_table.c if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { h 454 net/decnet/dn_table.c cb->args[3] = h; h 458 net/decnet/dn_table.c cb->args[3] = h; h 492 net/decnet/dn_table.c unsigned int h, s_h; h 507 net/decnet/dn_table.c for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { h 509 net/decnet/dn_table.c hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) { h 524 net/decnet/dn_table.c cb->args[0] = h; h 839 net/decnet/dn_table.c unsigned int h; h 847 net/decnet/dn_table.c h = n & (DN_FIB_TABLE_HASHSZ - 1); h 849 net/decnet/dn_table.c hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) { h 876 net/decnet/dn_table.c hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); h 895 net/decnet/dn_table.c unsigned int h; h 897 net/decnet/dn_table.c for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { h 898 net/decnet/dn_table.c hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) h 918 net/decnet/dn_table.c unsigned int h; h 921 net/decnet/dn_table.c for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { h 922 net/decnet/dn_table.c hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h], h 1813 net/ipv4/devinet.c int h, s_h; h 1848 net/ipv4/devinet.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 1850 net/ipv4/devinet.c head = &tgt_net->dev_index_head[h]; h 1857 net/ipv4/devinet.c if (h > s_h || idx > s_idx) h 1876 net/ipv4/devinet.c cb->args[0] = h; h 2224 net/ipv4/devinet.c int h, s_h; h 2248 net/ipv4/devinet.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 2250 net/ipv4/devinet.c head = &net->dev_index_head[h]; h 2277 net/ipv4/devinet.c if (h == NETDEV_HASHENTRIES) { h 2286 net/ipv4/devinet.c h++; h 2288 net/ipv4/devinet.c if (h == NETDEV_HASHENTRIES + 1) { h 2297 net/ipv4/devinet.c h++; h 2300 net/ipv4/devinet.c cb->args[0] = h; h 83 net/ipv4/fib_frontend.c unsigned int h; h 109 net/ipv4/fib_frontend.c h = id & (FIB_TABLE_HASHSZ - 1); h 110 net/ipv4/fib_frontend.c hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); h 120 net/ipv4/fib_frontend.c unsigned int h; h 124 net/ipv4/fib_frontend.c h = id & (FIB_TABLE_HASHSZ - 1); h 126 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h]; h 196 net/ipv4/fib_frontend.c unsigned int h; h 198 net/ipv4/fib_frontend.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) { h 199 net/ipv4/fib_frontend.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; h 980 net/ipv4/fib_frontend.c unsigned int h, s_h; h 1021 net/ipv4/fib_frontend.c for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { h 1023 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h]; h 1048 net/ipv4/fib_frontend.c cb->args[0] = h; h 2198 net/ipv4/fib_semantics.c int h = fib_multipath_hash(net, fl4, skb, NULL); h 2200 net/ipv4/fib_semantics.c fib_select_multipath(res, h); h 2007 net/ipv4/fib_trie.c unsigned int h; h 2009 net/ipv4/fib_trie.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) { h 2010 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; h 2059 net/ipv4/fib_trie.c unsigned int h; h 2061 net/ipv4/fib_trie.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) { h 2062 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; h 2451 net/ipv4/fib_trie.c unsigned int h; h 2459 net/ipv4/fib_trie.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) { h 2460 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; h 2490 net/ipv4/fib_trie.c unsigned int h; h 2492 net/ipv4/fib_trie.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) { h 2493 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; h 2525 net/ipv4/fib_trie.c unsigned int h; h 2535 net/ipv4/fib_trie.c h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); h 2544 net/ipv4/fib_trie.c while (++h < FIB_TABLE_HASHSZ) { h 2545 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; h 180 net/ipv4/inet_connection_sock.c struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; h 294 net/ipv4/inet_connection_sock.c struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; h 679 net/ipv4/inet_connection_sock.c struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo; h 1149 net/ipv4/inet_diag.c static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) h 1154 net/ipv4/inet_diag.c if (nlmsg_len(h) < hdrlen) h 1157 net/ipv4/inet_diag.c if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h 1158 net/ipv4/inet_diag.c h->nlmsg_flags & NLM_F_DUMP) { h 1159 net/ipv4/inet_diag.c if (nlmsg_attrlen(h, hdrlen)) { h 1163 net/ipv4/inet_diag.c attr = nlmsg_find_attr(h, hdrlen, h 1173 net/ipv4/inet_diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); h 1177 net/ipv4/inet_diag.c return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h)); h 1242 net/ipv4/inet_diag.c int inet_diag_register(const struct inet_diag_handler *h) h 1244 net/ipv4/inet_diag.c const __u16 type = h->idiag_type; h 1253 net/ipv4/inet_diag.c inet_diag_table[type] = h; h 1262 net/ipv4/inet_diag.c void inet_diag_unregister(const struct inet_diag_handler *h) h 1264 net/ipv4/inet_diag.c const __u16 type = h->idiag_type; h 105 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; h 130 net/ipv4/inet_hashtables.c struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; h 175 net/ipv4/inet_hashtables.c inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) h 189 net/ipv4/inet_hashtables.c return inet_lhash2_bucket(h, hash); h 192 net/ipv4/inet_hashtables.c static void inet_hash2(struct inet_hashinfo *h, struct sock *sk) h 196 net/ipv4/inet_hashtables.c if (!h->lhash2) h 199 net/ipv4/inet_hashtables.c ilb2 = inet_lhash2_bucket_sk(h, sk); h 212 net/ipv4/inet_hashtables.c static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk) h 216 net/ipv4/inet_hashtables.c if (!h->lhash2 || h 220 net/ipv4/inet_hashtables.c ilb2 = inet_lhash2_bucket_sk(h, sk); h 475 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; h 540 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; h 589 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; h 746 net/ipv4/inet_hashtables.c void inet_hashinfo_init(struct inet_hashinfo *h) h 751 net/ipv4/inet_hashtables.c spin_lock_init(&h->listening_hash[i].lock); h 752 net/ipv4/inet_hashtables.c INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head, h 754 net/ipv4/inet_hashtables.c h->listening_hash[i].count = 0; h 757 net/ipv4/inet_hashtables.c h->lhash2 = NULL; h 761 net/ipv4/inet_hashtables.c static void init_hashinfo_lhash2(struct inet_hashinfo *h) h 765 net/ipv4/inet_hashtables.c for (i = 0; i <= h->lhash2_mask; i++) { h 766 net/ipv4/inet_hashtables.c spin_lock_init(&h->lhash2[i].lock); h 767 net/ipv4/inet_hashtables.c INIT_HLIST_HEAD(&h->lhash2[i].head); h 768 net/ipv4/inet_hashtables.c h->lhash2[i].count = 0; h 772 net/ipv4/inet_hashtables.c void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, h 777 net/ipv4/inet_hashtables.c h->lhash2 = alloc_large_system_hash(name, h 778 net/ipv4/inet_hashtables.c sizeof(*h->lhash2), h 783 net/ipv4/inet_hashtables.c &h->lhash2_mask, h 786 net/ipv4/inet_hashtables.c init_hashinfo_lhash2(h); h 789 net/ipv4/inet_hashtables.c int inet_hashinfo2_init_mod(struct inet_hashinfo *h) h 791 net/ipv4/inet_hashtables.c h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL); h 792 net/ipv4/inet_hashtables.c if (!h->lhash2) h 795 net/ipv4/inet_hashtables.c h->lhash2_mask = INET_LHTABLE_SIZE - 1; h 797 net/ipv4/inet_hashtables.c BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask); h 799 net/ipv4/inet_hashtables.c init_hashinfo_lhash2(h); h 175 net/ipv4/ip_tunnel.c unsigned int h; h 187 net/ipv4/ip_tunnel.c h = ip_tunnel_hash(i_key, remote); h 188 net/ipv4/ip_tunnel.c return &itn->tunnels[h]; h 1077 net/ipv4/ip_tunnel.c int h; h 1083 net/ipv4/ip_tunnel.c for (h = 0; h < IP_TNL_HASH_SIZE; h++) { h 1086 net/ipv4/ip_tunnel.c struct hlist_head *thead = &itn->tunnels[h]; h 801 net/ipv4/ipconfig.c struct iphdr *h; h 815 net/ipv4/ipconfig.c h = ip_hdr(skb); h 816 net/ipv4/ipconfig.c h->version = 4; h 817 net/ipv4/ipconfig.c h->ihl = 5; h 818 net/ipv4/ipconfig.c h->tot_len = htons(sizeof(struct bootp_pkt)); h 819 net/ipv4/ipconfig.c h->frag_off = htons(IP_DF); h 820 net/ipv4/ipconfig.c h->ttl = 64; h 821 net/ipv4/ipconfig.c h->protocol = IPPROTO_UDP; h 822 net/ipv4/ipconfig.c h->daddr = htonl(INADDR_BROADCAST); h 823 net/ipv4/ipconfig.c h->check = ip_fast_csum((unsigned char *) h, h->ihl); h 961 net/ipv4/ipconfig.c struct iphdr *h; h 982 net/ipv4/ipconfig.c h = &b->iph; h 984 net/ipv4/ipconfig.c if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) h 988 net/ipv4/ipconfig.c if (ip_is_fragment(h)) { h 993 net/ipv4/ipconfig.c if (skb->len < ntohs(h->tot_len)) h 996 net/ipv4/ipconfig.c if (ip_fast_csum((char *) h, h->ihl)) h 1002 net/ipv4/ipconfig.c if (ntohs(h->tot_len) < ntohs(b->udph.len) + sizeof(struct iphdr)) h 1018 net/ipv4/ipconfig.c h = &b->iph; h 459 net/ipv4/netfilter/arp_tables.c unsigned int h; h 480 net/ipv4/netfilter/arp_tables.c for (h = 0; h < NF_ARP_NUMHOOKS; h++) { h 481 net/ipv4/netfilter/arp_tables.c if (!(valid_hooks & (1 << h))) h 483 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)e - base == hook_entries[h]) h 484 net/ipv4/netfilter/arp_tables.c newinfo->hook_entry[h] = hook_entries[h]; h 485 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)e - base == underflows[h]) { h 489 net/ipv4/netfilter/arp_tables.c newinfo->underflow[h] = underflows[h]; h 1134 net/ipv4/netfilter/arp_tables.c int h; h 1149 net/ipv4/netfilter/arp_tables.c for (h = 0; h < NF_ARP_NUMHOOKS; h++) { h 1150 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)de - base < newinfo->hook_entry[h]) h 1151 net/ipv4/netfilter/arp_tables.c newinfo->hook_entry[h] -= origsize - *size; h 1152 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)de - base < newinfo->underflow[h]) h 1153 net/ipv4/netfilter/arp_tables.c newinfo->underflow[h] -= origsize - *size; h 596 net/ipv4/netfilter/ip_tables.c unsigned int h; h 617 net/ipv4/netfilter/ip_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) { h 618 net/ipv4/netfilter/ip_tables.c if (!(valid_hooks & (1 << h))) h 620 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)e - base == hook_entries[h]) h 621 net/ipv4/netfilter/ip_tables.c newinfo->hook_entry[h] = hook_entries[h]; h 622 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)e - base == underflows[h]) { h 626 net/ipv4/netfilter/ip_tables.c newinfo->underflow[h] = underflows[h]; h 1362 net/ipv4/netfilter/ip_tables.c int h; h 1382 net/ipv4/netfilter/ip_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) { h 1383 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)de - base < newinfo->hook_entry[h]) h 1384 net/ipv4/netfilter/ip_tables.c newinfo->hook_entry[h] -= origsize - *size; h 1385 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)de - base < newinfo->underflow[h]) h 1386 net/ipv4/netfilter/ip_tables.c newinfo->underflow[h] -= origsize - *size; h 95 net/ipv4/raw.c struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; h 98 net/ipv4/raw.c head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; h 100 net/ipv4/raw.c write_lock_bh(&h->lock); h 103 net/ipv4/raw.c write_unlock_bh(&h->lock); h 111 net/ipv4/raw.c struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; h 113 net/ipv4/raw.c write_lock_bh(&h->lock); h 116 net/ipv4/raw.c write_unlock_bh(&h->lock); h 981 net/ipv4/raw.c .h.raw_hash = &raw_v4_hashinfo, h 994 net/ipv4/raw.c struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); h 999 net/ipv4/raw.c sk_for_each(sk, &h->ht[state->bucket]) h 1010 net/ipv4/raw.c struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); h 1020 net/ipv4/raw.c sk = sk_head(&h->ht[state->bucket]); h 1038 net/ipv4/raw.c struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); h 1040 net/ipv4/raw.c read_lock(&h->lock); h 1060 net/ipv4/raw.c struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); h 1062 net/ipv4/raw.c read_unlock(&h->lock); h 2015 net/ipv4/route.c int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); h 2017 net/ipv4/route.c fib_select_multipath(res, h); h 2610 net/ipv4/tcp_ipv4.c .h.hashinfo = &tcp_hashinfo, h 230 net/ipv4/udp.c struct udp_table *udptable = sk->sk_prot->h.udp_table; h 1887 net/ipv4/udp.c struct udp_table *udptable = sk->sk_prot->h.udp_table; h 1918 net/ipv4/udp.c struct udp_table *udptable = sk->sk_prot->h.udp_table; h 2800 net/ipv4/udp.c .h.udp_table = &udp_table, h 58 net/ipv4/udplite.c .h.udp_table = &udplite_table, h 705 net/ipv6/addrconf.c int h, s_h; h 729 net/ipv6/addrconf.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 731 net/ipv6/addrconf.c head = &net->dev_index_head[h]; h 758 net/ipv6/addrconf.c if (h == NETDEV_HASHENTRIES) { h 767 net/ipv6/addrconf.c h++; h 769 net/ipv6/addrconf.c if (h == NETDEV_HASHENTRIES + 1) { h 778 net/ipv6/addrconf.c h++; h 781 net/ipv6/addrconf.c cb->args[0] = h; h 3754 net/ipv6/addrconf.c struct hlist_head *h = &inet6_addr_lst[i]; h 3758 net/ipv6/addrconf.c hlist_for_each_entry_rcu(ifa, h, addr_lst) { h 5166 net/ipv6/addrconf.c int h, s_h; h 5202 net/ipv6/addrconf.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 5204 net/ipv6/addrconf.c head = &tgt_net->dev_index_head[h]; h 5208 net/ipv6/addrconf.c if (h > s_h || idx > s_idx) h 5223 net/ipv6/addrconf.c cb->args[0] = h; h 5849 net/ipv6/addrconf.c int h, s_h; h 5869 net/ipv6/addrconf.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 5871 net/ipv6/addrconf.c head = &net->dev_index_head[h]; h 5890 net/ipv6/addrconf.c cb->args[0] = h; h 238 net/ipv6/anycast.c static void aca_free_rcu(struct rcu_head *h) h 240 net/ipv6/anycast.c struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu); h 960 net/ipv6/exthdrs.c struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt)); h 962 net/ipv6/exthdrs.c memcpy(h, opt, ipv6_optlen(opt)); h 963 net/ipv6/exthdrs.c h->nexthdr = *proto; h 219 net/ipv6/ip6_fib.c unsigned int h; h 226 net/ipv6/ip6_fib.c h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); h 232 net/ipv6/ip6_fib.c hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); h 275 net/ipv6/ip6_fib.c unsigned int h; h 279 net/ipv6/ip6_fib.c h = id & (FIB6_TABLE_HASHSZ - 1); h 281 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h]; h 345 net/ipv6/ip6_fib.c unsigned int h, fib_seq = 0; h 348 net/ipv6/ip6_fib.c for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { h 349 net/ipv6/ip6_fib.c struct hlist_head *head = &net->ipv6.fib_table_hash[h]; h 437 net/ipv6/ip6_fib.c unsigned int h; h 448 net/ipv6/ip6_fib.c for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { h 449 net/ipv6/ip6_fib.c struct hlist_head *head = &net->ipv6.fib_table_hash[h]; h 569 net/ipv6/ip6_fib.c unsigned int h, s_h; h 635 net/ipv6/ip6_fib.c for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { h 637 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h]; h 651 net/ipv6/ip6_fib.c cb->args[0] = h; h 2140 net/ipv6/ip6_fib.c unsigned int h; h 2143 net/ipv6/ip6_fib.c for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { h 2144 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h]; h 2441 net/ipv6/ip6_fib.c unsigned int h; h 2445 net/ipv6/ip6_fib.c h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; h 2448 net/ipv6/ip6_fib.c h = 0; h 2452 net/ipv6/ip6_fib.c while (!node && h < FIB6_TABLE_HASHSZ) { h 2454 net/ipv6/ip6_fib.c hlist_first_rcu(&net->ipv6.fib_table_hash[h++])); h 253 net/ipv6/ip6_gre.c unsigned int h = HASH_KEY(p->i_key); h 260 net/ipv6/ip6_gre.c h ^= HASH_ADDR(remote); h 263 net/ipv6/ip6_gre.c return &ign->tunnels[prio][h]; h 1538 net/ipv6/ip6_gre.c int h; h 1539 net/ipv6/ip6_gre.c for (h = 0; h < IP6_GRE_HASH_SIZE; h++) { h 1542 net/ipv6/ip6_gre.c t = rtnl_dereference(ign->tunnels[prio][h]); h 195 net/ipv6/ip6_tunnel.c unsigned int h = 0; h 200 net/ipv6/ip6_tunnel.c h = HASH(remote, local); h 202 net/ipv6/ip6_tunnel.c return &ip6n->tnls[prio][h]; h 2194 net/ipv6/ip6_tunnel.c int h; h 2201 net/ipv6/ip6_tunnel.c for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) { h 2202 net/ipv6/ip6_tunnel.c t = rtnl_dereference(ip6n->tnls_r_l[h]); h 141 net/ipv6/ip6_vti.c unsigned int h = 0; h 146 net/ipv6/ip6_vti.c h = HASH(remote, local); h 148 net/ipv6/ip6_vti.c return &ip6n->tnls[prio][h]; h 1109 net/ipv6/ip6_vti.c int h; h 1112 net/ipv6/ip6_vti.c for (h = 0; h < IP6_VTI_HASH_SIZE; h++) { h 1113 net/ipv6/ip6_vti.c t = rtnl_dereference(ip6n->tnls_r_l[h]); h 614 net/ipv6/netfilter/ip6_tables.c unsigned int h; h 635 net/ipv6/netfilter/ip6_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) { h 636 net/ipv6/netfilter/ip6_tables.c if (!(valid_hooks & (1 << h))) h 638 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)e - base == hook_entries[h]) h 639 net/ipv6/netfilter/ip6_tables.c newinfo->hook_entry[h] = hook_entries[h]; h 640 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)e - base == underflows[h]) { h 644 net/ipv6/netfilter/ip6_tables.c newinfo->underflow[h] = underflows[h]; h 1378 net/ipv6/netfilter/ip6_tables.c int h; h 1397 net/ipv6/netfilter/ip6_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) { h 1398 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)de - base < newinfo->hook_entry[h]) h 1399 net/ipv6/netfilter/ip6_tables.c newinfo->hook_entry[h] -= origsize - *size; h 1400 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)de - base < newinfo->underflow[h]) h 1401 net/ipv6/netfilter/ip6_tables.c newinfo->underflow[h] -= origsize - *size; h 1298 net/ipv6/raw.c .h.raw_hash = &raw_v6_hashinfo, h 4283 net/ipv6/route.c unsigned int h; h 4287 net/ipv6/route.c for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { h 4288 net/ipv6/route.c head = &net->ipv6.fib_table_hash[h]; h 133 net/ipv6/sit.c unsigned int h = 0; h 138 net/ipv6/sit.c h ^= HASH(remote); h 142 net/ipv6/sit.c h ^= HASH(local); h 144 net/ipv6/sit.c return &sitn->tunnels[prio][h]; h 1822 net/ipv6/sit.c int h; h 1823 net/ipv6/sit.c for (h = 0; h < IP6_SIT_HASH_SIZE; h++) { h 1826 net/ipv6/sit.c t = rtnl_dereference(sitn->tunnels[prio][h]); h 2044 net/ipv6/tcp_ipv6.c .h.hashinfo = &tcp_hashinfo, h 1678 net/ipv6/udp.c .h.udp_table = &udp_table, h 54 net/ipv6/udplite.c .h.udp_table = &udplite_table, h 59 net/ipv6/xfrm6_tunnel.c unsigned int h; h 61 net/ipv6/xfrm6_tunnel.c h = ipv6_addr_hash((const struct in6_addr *)addr); h 62 net/ipv6/xfrm6_tunnel.c h ^= h >> 16; h 63 net/ipv6/xfrm6_tunnel.c h ^= h >> 8; h 64 net/ipv6/xfrm6_tunnel.c h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; h 66 net/ipv6/xfrm6_tunnel.c return h; h 45 net/mac80211/agg-rx.c static void ieee80211_free_tid_rx(struct rcu_head *h) h 48 net/mac80211/agg-rx.c container_of(h, struct tid_ampdu_rx, rcu_head); h 1308 net/mpls/af_mpls.c int h, s_h; h 1328 net/mpls/af_mpls.c for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { h 1330 net/mpls/af_mpls.c head = &net->dev_index_head[h]; h 1355 net/mpls/af_mpls.c cb->args[0] = h; h 19 net/ncsi/ncsi-aen.c static int ncsi_validate_aen_pkt(struct ncsi_aen_pkt_hdr *h, h 25 net/ncsi/ncsi-aen.c if (h->common.revision != NCSI_PKT_REVISION) h 27 net/ncsi/ncsi-aen.c if (ntohs(h->common.length) != payload) h 34 net/ncsi/ncsi-aen.c pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); h 38 net/ncsi/ncsi-aen.c checksum = ncsi_calculate_checksum((unsigned char *)h, h 39 net/ncsi/ncsi-aen.c sizeof(*h) + payload - 4); h 47 net/ncsi/ncsi-aen.c struct ncsi_aen_pkt_hdr *h) h 60 net/ncsi/ncsi-aen.c ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc); h 65 net/ncsi/ncsi-aen.c lsc = (struct ncsi_aen_lsc_pkt *)h; h 141 net/ncsi/ncsi-aen.c struct ncsi_aen_pkt_hdr *h) h 147 net/ncsi/ncsi-aen.c ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc); h 173 net/ncsi/ncsi-aen.c struct ncsi_aen_pkt_hdr *h) h 181 net/ncsi/ncsi-aen.c ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc); h 187 net/ncsi/ncsi-aen.c hncdsc = (struct ncsi_aen_hncdsc_pkt *)h; h 201 net/ncsi/ncsi-aen.c struct ncsi_aen_pkt_hdr *h); h 210 net/ncsi/ncsi-aen.c struct ncsi_aen_pkt_hdr *h; h 215 net/ncsi/ncsi-aen.c h = (struct ncsi_aen_pkt_hdr *)skb_network_header(skb); h 217 net/ncsi/ncsi-aen.c if (ncsi_aen_handlers[i].type == h->type) { h 225 net/ncsi/ncsi-aen.c h->type); h 229 net/ncsi/ncsi-aen.c ret = ncsi_validate_aen_pkt(h, nah->payload); h 233 net/ncsi/ncsi-aen.c h->type); h 237 net/ncsi/ncsi-aen.c ret = nah->handler(ndp, h); h 241 net/ncsi/ncsi-aen.c h->type, ret); h 36 net/ncsi/ncsi-cmd.c static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h, h 42 net/ncsi/ncsi-cmd.c h->mc_id = 0; h 43 net/ncsi/ncsi-cmd.c h->revision = NCSI_PKT_REVISION; h 44 net/ncsi/ncsi-cmd.c h->reserved = 0; h 45 net/ncsi/ncsi-cmd.c h->id = nca->id; h 46 net/ncsi/ncsi-cmd.c h->type = nca->type; h 47 net/ncsi/ncsi-cmd.c h->channel = NCSI_TO_CHANNEL(nca->package, h 49 net/ncsi/ncsi-cmd.c h->length = htons(nca->payload); h 50 net/ncsi/ncsi-cmd.c h->reserved1[0] = 0; h 51 net/ncsi/ncsi-cmd.c h->reserved1[1] = 0; h 54 net/ncsi/ncsi-cmd.c checksum = ncsi_calculate_checksum((unsigned char *)h, h 55 net/ncsi/ncsi-cmd.c sizeof(*h) + nca->payload); h 56 net/ncsi/ncsi-cmd.c pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + h 25 net/ncsi/ncsi-rsp.c struct ncsi_rsp_pkt_hdr *h; h 33 net/ncsi/ncsi-rsp.c h = (struct ncsi_rsp_pkt_hdr *)skb_network_header(nr->rsp); h 35 net/ncsi/ncsi-rsp.c if (h->common.revision != NCSI_PKT_REVISION) { h 40 net/ncsi/ncsi-rsp.c if (ntohs(h->common.length) != payload) { h 47 net/ncsi/ncsi-rsp.c if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || h 48 net/ncsi/ncsi-rsp.c ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { h 51 net/ncsi/ncsi-rsp.c ntohs(h->code), ntohs(h->reason)); h 59 net/ncsi/ncsi-rsp.c pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4); h 63 net/ncsi/ncsi-rsp.c checksum = ncsi_calculate_checksum((unsigned char *)h, h 64 net/ncsi/ncsi-rsp.c sizeof(*h) + payload - 4); h 67 net/netfilter/core.c static void __nf_hook_entries_free(struct rcu_head *h) h 71 net/netfilter/core.c head = container_of(h, struct nf_hook_entries_rcu_head, head); h 611 net/netfilter/core.c int h; h 613 net/netfilter/core.c for (h = 0; h < max; h++) h 614 net/netfilter/core.c RCU_INIT_POINTER(e[h], NULL); h 48 net/netfilter/ipset/ip_set_hash_gen.h #define AHASH_MAX(h) ((h)->ahash_max) h 65 net/netfilter/ipset/ip_set_hash_gen.h #define TUNE_AHASH_MAX(h, multi) \ h 66 net/netfilter/ipset/ip_set_hash_gen.h ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) h 68 net/netfilter/ipset/ip_set_hash_gen.h #define AHASH_MAX(h) AHASH_MAX_SIZE h 69 net/netfilter/ipset/ip_set_hash_gen.h #define TUNE_AHASH_MAX(h, multi) h 92 net/netfilter/ipset/ip_set_hash_gen.h #define ahash_bucket_start(h, htable_bits) \ h 94 net/netfilter/ipset/ip_set_hash_gen.h : (h) * jhash_size(HTABLE_REGION_BITS)) h 95 net/netfilter/ipset/ip_set_hash_gen.h #define ahash_bucket_end(h, htable_bits) \ h 97 net/netfilter/ipset/ip_set_hash_gen.h : ((h) + 1) * jhash_size(HTABLE_REGION_BITS)) h 115 net/netfilter/ipset/ip_set_hash_gen.h #define hbucket(h, i) ((h)->bucket[i]) h 352 net/netfilter/ipset/ip_set_hash_gen.h mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) h 358 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) { h 361 net/netfilter/ipset/ip_set_hash_gen.h } else if (h->nets[i].cidr[n] < cidr) { h 363 net/netfilter/ipset/ip_set_hash_gen.h } else if (h->nets[i].cidr[n] == cidr) { h 364 net/netfilter/ipset/ip_set_hash_gen.h h->nets[CIDR_POS(cidr)].nets[n]++; h 370 net/netfilter/ipset/ip_set_hash_gen.h h->nets[i].cidr[n] = h->nets[i - 1].cidr[n]; h 372 net/netfilter/ipset/ip_set_hash_gen.h h->nets[i].cidr[n] = cidr; h 373 net/netfilter/ipset/ip_set_hash_gen.h h->nets[CIDR_POS(cidr)].nets[n] = 1; h 379 net/netfilter/ipset/ip_set_hash_gen.h mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) h 385 net/netfilter/ipset/ip_set_hash_gen.h if (h->nets[i].cidr[n] != cidr) h 387 net/netfilter/ipset/ip_set_hash_gen.h h->nets[CIDR_POS(cidr)].nets[n]--; h 388 net/netfilter/ipset/ip_set_hash_gen.h if (h->nets[CIDR_POS(cidr)].nets[n] > 0) h 390 net/netfilter/ipset/ip_set_hash_gen.h for (j = i; j < net_end && h->nets[j].cidr[n]; j++) h 391 net/netfilter/ipset/ip_set_hash_gen.h h->nets[j].cidr[n] = h->nets[j + 1].cidr[n]; h 392 net/netfilter/ipset/ip_set_hash_gen.h h->nets[j].cidr[n] = 0; h 402 net/netfilter/ipset/ip_set_hash_gen.h mtype_ahash_memsize(const struct htype *h, const struct htable *t) h 404 net/netfilter/ipset/ip_set_hash_gen.h return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits); h 425 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 430 net/netfilter/ipset/ip_set_hash_gen.h t = ipset_dereference_nfnl(h->table); h 449 net/netfilter/ipset/ip_set_hash_gen.h memset(h->nets, 0, sizeof(h->nets)); h 478 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 482 net/netfilter/ipset/ip_set_hash_gen.h cancel_delayed_work_sync(&h->gc.dwork); h 484 net/netfilter/ipset/ip_set_hash_gen.h mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true); h 485 net/netfilter/ipset/ip_set_hash_gen.h list_for_each_safe(l, lt, &h->ad) { h 489 net/netfilter/ipset/ip_set_hash_gen.h kfree(h); h 513 net/netfilter/ipset/ip_set_hash_gen.h mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r) h 543 net/netfilter/ipset/ip_set_hash_gen.h mtype_del_cidr(set, h, h 590 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h; h 597 net/netfilter/ipset/ip_set_hash_gen.h h = set->data; h 600 net/netfilter/ipset/ip_set_hash_gen.h t = ipset_dereference_set(h->table, set); h 612 net/netfilter/ipset/ip_set_hash_gen.h mtype_gc_do(set, h, t, r); h 644 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 665 net/netfilter/ipset/ip_set_hash_gen.h orig = ipset_dereference_bh_nfnl(h->table); h 690 net/netfilter/ipset/ip_set_hash_gen.h t->maxelem = h->maxelem / ahash_numof_locks(htable_bits); h 697 net/netfilter/ipset/ip_set_hash_gen.h orig = ipset_dereference_bh_nfnl(h->table); h 725 net/netfilter/ipset/ip_set_hash_gen.h key = HKEY(data, h->initval, htable_bits); h 744 net/netfilter/ipset/ip_set_hash_gen.h if (m->size >= AHASH_MAX(h)) { h 779 net/netfilter/ipset/ip_set_hash_gen.h rcu_assign_pointer(h->table, t); h 790 net/netfilter/ipset/ip_set_hash_gen.h list_for_each_safe(l, lt, &h->ad) { h 826 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 832 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); h 858 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 869 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); h 870 net/netfilter/ipset/ip_set_hash_gen.h key = HKEY(value, h->initval, t->htable_bits); h 879 net/netfilter/ipset/ip_set_hash_gen.h mtype_gc_do(set, h, t, r); h 882 net/netfilter/ipset/ip_set_hash_gen.h maxelem = h->maxelem; h 940 net/netfilter/ipset/ip_set_hash_gen.h mtype_del_cidr(set, h, h 953 net/netfilter/ipset/ip_set_hash_gen.h TUNE_AHASH_MAX(h, multi); h 954 net/netfilter/ipset/ip_set_hash_gen.h if (n->size >= AHASH_MAX(h)) { h 956 net/netfilter/ipset/ip_set_hash_gen.h mtype_data_next(&h->next, d); h 982 net/netfilter/ipset/ip_set_hash_gen.h mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i); h 1022 net/netfilter/ipset/ip_set_hash_gen.h list_add_tail(&x->list, &h->ad); h 1048 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 1062 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); h 1063 net/netfilter/ipset/ip_set_hash_gen.h key = HKEY(value, h->initval, t->htable_bits); h 1091 net/netfilter/ipset/ip_set_hash_gen.h mtype_del_cidr(set, h, h 1145 net/netfilter/ipset/ip_set_hash_gen.h list_add(&x->list, &h->ad); h 1174 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 1175 net/netfilter/ipset/ip_set_hash_gen.h struct htable *t = rcu_dereference_bh(h->table); h 1187 net/netfilter/ipset/ip_set_hash_gen.h for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) { h 1190 net/netfilter/ipset/ip_set_hash_gen.h mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false); h 1191 net/netfilter/ipset/ip_set_hash_gen.h for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi; h 1193 net/netfilter/ipset/ip_set_hash_gen.h mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]), h 1196 net/netfilter/ipset/ip_set_hash_gen.h mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0])); h 1198 net/netfilter/ipset/ip_set_hash_gen.h key = HKEY(d, h->initval, t->htable_bits); h 1229 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 1238 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); h 1252 net/netfilter/ipset/ip_set_hash_gen.h key = HKEY(d, h->initval, t->htable_bits); h 1277 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 1286 net/netfilter/ipset/ip_set_hash_gen.h t = rcu_dereference_bh(h->table); h 1288 net/netfilter/ipset/ip_set_hash_gen.h memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size; h 1297 net/netfilter/ipset/ip_set_hash_gen.h nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem))) h 1300 net/netfilter/ipset/ip_set_hash_gen.h if (h->netmask != HOST_MASK && h 1301 net/netfilter/ipset/ip_set_hash_gen.h nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask)) h 1305 net/netfilter/ipset/ip_set_hash_gen.h if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask)) h 1325 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h = set->data; h 1330 net/netfilter/ipset/ip_set_hash_gen.h t = ipset_dereference_bh_nfnl(h->table); h 1463 net/netfilter/ipset/ip_set_hash_gen.h struct htype *h; h 1518 net/netfilter/ipset/ip_set_hash_gen.h hsize = sizeof(*h); h 1519 net/netfilter/ipset/ip_set_hash_gen.h h = kzalloc(hsize, GFP_KERNEL); h 1520 net/netfilter/ipset/ip_set_hash_gen.h if (!h) h 1526 net/netfilter/ipset/ip_set_hash_gen.h kfree(h); h 1531 net/netfilter/ipset/ip_set_hash_gen.h kfree(h); h 1537 net/netfilter/ipset/ip_set_hash_gen.h kfree(h); h 1540 net/netfilter/ipset/ip_set_hash_gen.h h->gc.set = set; h 1543 net/netfilter/ipset/ip_set_hash_gen.h h->maxelem = maxelem; h 1545 net/netfilter/ipset/ip_set_hash_gen.h h->netmask = netmask; h 1548 net/netfilter/ipset/ip_set_hash_gen.h h->markmask = markmask; h 1550 net/netfilter/ipset/ip_set_hash_gen.h get_random_bytes(&h->initval, sizeof(h->initval)); h 1553 net/netfilter/ipset/ip_set_hash_gen.h t->maxelem = h->maxelem / ahash_numof_locks(hbits); h 1554 net/netfilter/ipset/ip_set_hash_gen.h RCU_INIT_POINTER(h->table, t); h 1556 net/netfilter/ipset/ip_set_hash_gen.h INIT_LIST_HEAD(&h->ad); h 1557 net/netfilter/ipset/ip_set_hash_gen.h set->data = h; h 1579 net/netfilter/ipset/ip_set_hash_gen.h IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc); h 1582 net/netfilter/ipset/ip_set_hash_gen.h IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc); h 1587 net/netfilter/ipset/ip_set_hash_gen.h t->htable_bits, h->maxelem, set->data, t); h 81 net/netfilter/ipset/ip_set_hash_ip.c const struct hash_ip4 *h = set->data; h 88 net/netfilter/ipset/ip_set_hash_ip.c ip &= ip_set_netmask(h->netmask); h 100 net/netfilter/ipset/ip_set_hash_ip.c const struct hash_ip4 *h = set->data; h 121 net/netfilter/ipset/ip_set_hash_ip.c ip &= ip_set_hostmask(h->netmask); h 144 net/netfilter/ipset/ip_set_hash_ip.c hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); h 147 net/netfilter/ipset/ip_set_hash_ip.c ip = ntohl(h->next.ip); h 218 net/netfilter/ipset/ip_set_hash_ip.c const struct hash_ip6 *h = set->data; h 224 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_netmask(&e.ip, h->netmask); h 235 net/netfilter/ipset/ip_set_hash_ip.c const struct hash_ip6 *h = set->data; h 263 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_netmask(&e.ip, h->netmask); h 83 net/netfilter/ipset/ip_set_hash_ipmark.c const struct hash_ipmark4 *h = set->data; h 89 net/netfilter/ipset/ip_set_hash_ipmark.c e.mark &= h->markmask; h 99 net/netfilter/ipset/ip_set_hash_ipmark.c const struct hash_ipmark4 *h = set->data; h 122 net/netfilter/ipset/ip_set_hash_ipmark.c e.mark &= h->markmask; h 146 net/netfilter/ipset/ip_set_hash_ipmark.c ip = ntohl(h->next.ip); h 209 net/netfilter/ipset/ip_set_hash_ipmark.c const struct hash_ipmark6 *h = set->data; h 215 net/netfilter/ipset/ip_set_hash_ipmark.c e.mark &= h->markmask; h 225 net/netfilter/ipset/ip_set_hash_ipmark.c const struct hash_ipmark6 *h = set->data; h 255 net/netfilter/ipset/ip_set_hash_ipmark.c e.mark &= h->markmask; h 107 net/netfilter/ipset/ip_set_hash_ipport.c const struct hash_ipport4 *h = set->data; h 176 net/netfilter/ipset/ip_set_hash_ipport.c ip = ntohl(h->next.ip); h 178 net/netfilter/ipset/ip_set_hash_ipport.c p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) h 265 net/netfilter/ipset/ip_set_hash_ipport.c const struct hash_ipport6 *h = set->data; h 323 net/netfilter/ipset/ip_set_hash_ipport.c port = ntohs(h->next.port); h 110 net/netfilter/ipset/ip_set_hash_ipportip.c const struct hash_ipportip4 *h = set->data; h 183 net/netfilter/ipset/ip_set_hash_ipportip.c ip = ntohl(h->next.ip); h 185 net/netfilter/ipset/ip_set_hash_ipportip.c p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) h 276 net/netfilter/ipset/ip_set_hash_ipportip.c const struct hash_ipportip6 *h = set->data; h 338 net/netfilter/ipset/ip_set_hash_ipportip.c port = ntohs(h->next.port); h 137 net/netfilter/ipset/ip_set_hash_ipportnet.c const struct hash_ipportnet4 *h = set->data; h 140 net/netfilter/ipset/ip_set_hash_ipportnet.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 162 net/netfilter/ipset/ip_set_hash_ipportnet.c const struct hash_ipportnet4 *h = set->data; h 269 net/netfilter/ipset/ip_set_hash_ipportnet.c ip = ntohl(h->next.ip); h 270 net/netfilter/ipset/ip_set_hash_ipportnet.c p = ntohs(h->next.port); h 271 net/netfilter/ipset/ip_set_hash_ipportnet.c ip2 = ntohl(h->next.ip2); h 388 net/netfilter/ipset/ip_set_hash_ipportnet.c const struct hash_ipportnet6 *h = set->data; h 391 net/netfilter/ipset/ip_set_hash_ipportnet.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 413 net/netfilter/ipset/ip_set_hash_ipportnet.c const struct hash_ipportnet6 *h = set->data; h 494 net/netfilter/ipset/ip_set_hash_ipportnet.c port = ntohs(h->next.port); h 116 net/netfilter/ipset/ip_set_hash_net.c const struct hash_net4 *h = set->data; h 119 net/netfilter/ipset/ip_set_hash_net.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 138 net/netfilter/ipset/ip_set_hash_net.c const struct hash_net4 *h = set->data; h 191 net/netfilter/ipset/ip_set_hash_net.c ip = ntohl(h->next.ip); h 284 net/netfilter/ipset/ip_set_hash_net.c const struct hash_net6 *h = set->data; h 287 net/netfilter/ipset/ip_set_hash_net.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 155 net/netfilter/ipset/ip_set_hash_netiface.c struct hash_netiface4 *h = set->data; h 158 net/netfilter/ipset/ip_set_hash_netiface.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 195 net/netfilter/ipset/ip_set_hash_netiface.c struct hash_netiface4 *h = set->data; h 253 net/netfilter/ipset/ip_set_hash_netiface.c ip = ntohl(h->next.ip); h 365 net/netfilter/ipset/ip_set_hash_netiface.c struct hash_netiface6 *h = set->data; h 368 net/netfilter/ipset/ip_set_hash_netiface.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 143 net/netfilter/ipset/ip_set_hash_netnet.c const struct hash_netnet4 *h = set->data; h 148 net/netfilter/ipset/ip_set_hash_netnet.c e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK); h 149 net/netfilter/ipset/ip_set_hash_netnet.c e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK); h 165 net/netfilter/ipset/ip_set_hash_netnet.c const struct hash_netnet4 *h = set->data; h 248 net/netfilter/ipset/ip_set_hash_netnet.c ip = ntohl(h->next.ip[0]); h 249 net/netfilter/ipset/ip_set_hash_netnet.c ip2 = ntohl(h->next.ip[1]); h 377 net/netfilter/ipset/ip_set_hash_netnet.c const struct hash_netnet6 *h = set->data; h 382 net/netfilter/ipset/ip_set_hash_netnet.c e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK); h 383 net/netfilter/ipset/ip_set_hash_netnet.c e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK); h 132 net/netfilter/ipset/ip_set_hash_netport.c const struct hash_netport4 *h = set->data; h 135 net/netfilter/ipset/ip_set_hash_netport.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 156 net/netfilter/ipset/ip_set_hash_netport.c const struct hash_netport4 *h = set->data; h 239 net/netfilter/ipset/ip_set_hash_netport.c ip = ntohl(h->next.ip); h 240 net/netfilter/ipset/ip_set_hash_netport.c p = ntohs(h->next.port); h 348 net/netfilter/ipset/ip_set_hash_netport.c const struct hash_netport6 *h = set->data; h 351 net/netfilter/ipset/ip_set_hash_netport.c .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), h 372 net/netfilter/ipset/ip_set_hash_netport.c const struct hash_netport6 *h = set->data; h 442 net/netfilter/ipset/ip_set_hash_netport.c port = ntohs(h->next.port); h 153 net/netfilter/ipset/ip_set_hash_netportnet.c const struct hash_netportnet4 *h = set->data; h 158 net/netfilter/ipset/ip_set_hash_netportnet.c e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK); h 159 net/netfilter/ipset/ip_set_hash_netportnet.c e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK); h 179 net/netfilter/ipset/ip_set_hash_netportnet.c const struct hash_netportnet4 *h = set->data; h 288 net/netfilter/ipset/ip_set_hash_netportnet.c ip = ntohl(h->next.ip[0]); h 289 net/netfilter/ipset/ip_set_hash_netportnet.c p = ntohs(h->next.port); h 290 net/netfilter/ipset/ip_set_hash_netportnet.c ip2 = ntohl(h->next.ip[1]); h 432 net/netfilter/ipset/ip_set_hash_netportnet.c const struct hash_netportnet6 *h = set->data; h 437 net/netfilter/ipset/ip_set_hash_netportnet.c e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK); h 438 net/netfilter/ipset/ip_set_hash_netportnet.c e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK); h 458 net/netfilter/ipset/ip_set_hash_netportnet.c const struct hash_netportnet6 *h = set->data; h 539 net/netfilter/ipset/ip_set_hash_netportnet.c port = ntohs(h->next.port); h 244 net/netfilter/ipvs/ip_vs_nfct.c struct nf_conntrack_tuple_hash *h; h 262 net/netfilter/ipvs/ip_vs_nfct.c h = nf_conntrack_find_get(cp->ipvs->net, &nf_ct_zone_dflt, &tuple); h 263 net/netfilter/ipvs/ip_vs_nfct.c if (h) { h 264 net/netfilter/ipvs/ip_vs_nfct.c ct = nf_ct_tuplehash_to_ctrack(h); h 28 net/netfilter/ipvs/ip_vs_ovf.c struct ip_vs_dest *dest, *h = NULL; h 41 net/netfilter/ipvs/ip_vs_ovf.c if (!h || w > hw) { h 42 net/netfilter/ipvs/ip_vs_ovf.c h = dest; h 47 net/netfilter/ipvs/ip_vs_ovf.c if (h) { h 49 net/netfilter/ipvs/ip_vs_ovf.c IP_VS_DBG_ADDR(h->af, &h->addr), h 50 net/netfilter/ipvs/ip_vs_ovf.c ntohs(h->port), h 51 net/netfilter/ipvs/ip_vs_ovf.c atomic_read(&h->activeconns), h 52 net/netfilter/ipvs/ip_vs_ovf.c atomic_read(&h->weight)); h 53 net/netfilter/ipvs/ip_vs_ovf.c return h; h 267 net/netfilter/nf_conncount.c static void __tree_nodes_free(struct rcu_head *h) h 271 net/netfilter/nf_conncount.c rbconn = container_of(h, struct nf_conncount_rb, rcu_head); h 684 net/netfilter/nf_conntrack_core.c nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, h 689 net/netfilter/nf_conntrack_core.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); h 694 net/netfilter/nf_conntrack_core.c return nf_ct_tuple_equal(tuple, &h->tuple) && h 695 net/netfilter/nf_conntrack_core.c nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && h 733 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 742 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { h 745 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 751 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, tuple, zone, net)) h 752 net/netfilter/nf_conntrack_core.c return h; h 772 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 777 net/netfilter/nf_conntrack_core.c h = ____nf_conntrack_find(net, zone, tuple, hash); h 778 net/netfilter/nf_conntrack_core.c if (h) { h 782 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 784 net/netfilter/nf_conntrack_core.c if (likely(nf_ct_key_equal(h, tuple, zone, net))) h 791 net/netfilter/nf_conntrack_core.c h = NULL; h 796 net/netfilter/nf_conntrack_core.c return h; h 824 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 840 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) h 841 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, h 845 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) h 846 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, h 901 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h) h 904 net/netfilter/nf_conntrack_core.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); h 932 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 998 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) h 999 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, h 1003 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) h 1004 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, h 1039 net/netfilter/nf_conntrack_core.c ret = nf_ct_resolve_clash(net, skb, ctinfo, h); h 1056 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 1069 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { h 1070 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 1080 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, tuple, zone, net)) { h 1121 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 1126 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { h 1127 net/netfilter/nf_conntrack_core.c tmp = nf_ct_tuplehash_to_ctrack(h); h 1238 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 1251 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { h 1254 net/netfilter/nf_conntrack_core.c tmp = nf_ct_tuplehash_to_ctrack(h); h 1531 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 1547 net/netfilter/nf_conntrack_core.c h = __nf_conntrack_find_get(state->net, zone, &tuple, hash); h 1548 net/netfilter/nf_conntrack_core.c if (!h) { h 1549 net/netfilter/nf_conntrack_core.c h = init_conntrack(state->net, tmpl, &tuple, h 1551 net/netfilter/nf_conntrack_core.c if (!h) h 1553 net/netfilter/nf_conntrack_core.c if (IS_ERR(h)) h 1554 net/netfilter/nf_conntrack_core.c return PTR_ERR(h); h 1556 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 1559 net/netfilter/nf_conntrack_core.c if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { h 1886 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 1920 net/netfilter/nf_conntrack_core.c h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple); h 1921 net/netfilter/nf_conntrack_core.c if (!h) h 1930 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 2060 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 2070 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) { h 2071 net/netfilter/nf_conntrack_core.c if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) h 2073 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 2138 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 2145 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { h 2148 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 2334 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h; h 2362 net/netfilter/nf_conntrack_core.c h = hlist_nulls_entry(nf_conntrack_hash[i].first, h 2364 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); h 2365 net/netfilter/nf_conntrack_core.c hlist_nulls_del_rcu(&h->hnnode); h 2367 net/netfilter/nf_conntrack_core.c &h->tuple, hashsize); h 2368 net/netfilter/nf_conntrack_core.c hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); h 43 net/netfilter/nf_conntrack_ecache.c struct nf_conntrack_tuple_hash *h; h 50 net/netfilter/nf_conntrack_ecache.c hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) { h 51 net/netfilter/nf_conntrack_ecache.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); h 122 net/netfilter/nf_conntrack_expect.c unsigned int h; h 127 net/netfilter/nf_conntrack_expect.c h = nf_ct_expect_dst_hash(net, tuple); h 128 net/netfilter/nf_conntrack_expect.c hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) { h 162 net/netfilter/nf_conntrack_expect.c unsigned int h; h 167 net/netfilter/nf_conntrack_expect.c h = nf_ct_expect_dst_hash(net, tuple); h 168 net/netfilter/nf_conntrack_expect.c hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) { h 374 net/netfilter/nf_conntrack_expect.c unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple); h 391 net/netfilter/nf_conntrack_expect.c hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); h 423 net/netfilter/nf_conntrack_expect.c unsigned int h; h 430 net/netfilter/nf_conntrack_expect.c h = nf_ct_expect_dst_hash(net, &expect->tuple); h 431 net/netfilter/nf_conntrack_expect.c hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { h 59 net/netfilter/nf_conntrack_helper.c unsigned int h; h 64 net/netfilter/nf_conntrack_helper.c h = helper_hash(tuple); h 65 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) { h 75 net/netfilter/nf_conntrack_helper.c struct nf_conntrack_helper *h; h 79 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { h 80 net/netfilter/nf_conntrack_helper.c if (strcmp(h->name, name)) h 83 net/netfilter/nf_conntrack_helper.c if (h->tuple.src.l3num != NFPROTO_UNSPEC && h 84 net/netfilter/nf_conntrack_helper.c h->tuple.src.l3num != l3num) h 87 net/netfilter/nf_conntrack_helper.c if (h->tuple.dst.protonum == protonum) h 88 net/netfilter/nf_conntrack_helper.c return h; h 98 net/netfilter/nf_conntrack_helper.c struct nf_conntrack_helper *h; h 102 net/netfilter/nf_conntrack_helper.c h = __nf_conntrack_helper_find(name, l3num, protonum); h 104 net/netfilter/nf_conntrack_helper.c if (h == NULL) { h 108 net/netfilter/nf_conntrack_helper.c h = __nf_conntrack_helper_find(name, l3num, protonum); h 110 net/netfilter/nf_conntrack_helper.c return h; h 114 net/netfilter/nf_conntrack_helper.c if (h != NULL && !try_module_get(h->me)) h 115 net/netfilter/nf_conntrack_helper.c h = NULL; h 116 net/netfilter/nf_conntrack_helper.c if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) { h 117 net/netfilter/nf_conntrack_helper.c module_put(h->me); h 118 net/netfilter/nf_conntrack_helper.c h = NULL; h 123 net/netfilter/nf_conntrack_helper.c return h; h 152 net/netfilter/nf_conntrack_helper.c struct nf_conntrack_helper *h; h 158 net/netfilter/nf_conntrack_helper.c h = __nf_conntrack_helper_find(name, l3num, protonum); h 159 net/netfilter/nf_conntrack_helper.c if (!h) { h 164 net/netfilter/nf_conntrack_helper.c nat = nf_conntrack_nat_helper_find(h->nat_mod_name); h 166 net/netfilter/nf_conntrack_helper.c snprintf(mod_name, sizeof(mod_name), "%s", h->nat_mod_name); h 399 net/netfilter/nf_conntrack_helper.c unsigned int h = helper_hash(&me->tuple); h 425 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { h 434 net/netfilter/nf_conntrack_helper.c hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); h 908 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h; h 933 net/netfilter/nf_conntrack_netlink.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], h 935 net/netfilter/nf_conntrack_netlink.c if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) h 937 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 1258 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h; h 1286 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(net, &zone, &tuple); h 1287 net/netfilter/nf_conntrack_netlink.c if (!h) h 1290 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 1318 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h; h 1354 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(net, &zone, &tuple); h 1355 net/netfilter/nf_conntrack_netlink.c if (!h) h 1358 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 1399 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h; h 1423 net/netfilter/nf_conntrack_netlink.c hlist_nulls_for_each_entry(h, n, list, hnnode) { h 1424 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 2104 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h = NULL; h 2130 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(net, &zone, &otuple); h 2132 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(net, &zone, &rtuple); h 2134 net/netfilter/nf_conntrack_netlink.c if (h == NULL) { h 2177 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 2995 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h; h 3012 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(net, &zone, &tuple); h 3013 net/netfilter/nf_conntrack_netlink.c if (!h) h 3016 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 3314 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h = NULL; h 3335 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(net, zone, &master_tuple); h 3336 net/netfilter/nf_conntrack_netlink.c if (!h) h 3338 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); h 152 net/netfilter/nf_conntrack_pptp.c const struct nf_conntrack_tuple_hash *h; h 161 net/netfilter/nf_conntrack_pptp.c h = nf_conntrack_find_get(net, zone, t); h 162 net/netfilter/nf_conntrack_pptp.c if (h) { h 163 net/netfilter/nf_conntrack_pptp.c sibling = nf_ct_tuplehash_to_ctrack(h); h 242 net/netfilter/nf_conntrack_proto.c const struct nf_conntrack_tuple_hash *h; h 269 net/netfilter/nf_conntrack_proto.c h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); h 270 net/netfilter/nf_conntrack_proto.c if (h) { h 272 net/netfilter/nf_conntrack_proto.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); h 310 net/netfilter/nf_conntrack_proto.c const struct nf_conntrack_tuple_hash *h; h 333 net/netfilter/nf_conntrack_proto.c h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); h 334 net/netfilter/nf_conntrack_proto.c if (!h) { h 341 net/netfilter/nf_conntrack_proto.c ct = nf_ct_tuplehash_to_ctrack(h); h 110 net/netfilter/nf_conntrack_proto_icmp.c const struct nf_conntrack_tuple_hash *h; h 131 net/netfilter/nf_conntrack_proto_icmp.c h = nf_conntrack_find_get(state->net, zone, &innertuple); h 132 net/netfilter/nf_conntrack_proto_icmp.c if (!h) h 166 net/netfilter/nf_conntrack_proto_icmp.c ct = nf_ct_tuplehash_to_ctrack(h); h 167 net/netfilter/nf_conntrack_proto_icmp.c dir = NF_CT_DIRECTION(h); h 302 net/netfilter/nf_nat_core.c unsigned int h = hash_by_src(net, tuple); h 305 net/netfilter/nf_nat_core.c hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { h 816 net/netfilter/nf_nat_core.c unsigned int h; h 818 net/netfilter/nf_nat_core.c h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); h 819 net/netfilter/nf_nat_core.c spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); h 821 net/netfilter/nf_nat_core.c spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); h 1630 net/netfilter/nf_tables_api.c struct rcu_head h; h 6654 net/netfilter/nf_tables_api.c static void __nf_tables_commit_chain_free_rules_old(struct rcu_head *h) h 6656 net/netfilter/nf_tables_api.c struct nft_rules_old *o = container_of(h, struct nft_rules_old, h); h 6673 net/netfilter/nf_tables_api.c call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old); h 1034 net/netfilter/nfnetlink_log.c struct hlist_node *h) h 1036 net/netfilter/nfnetlink_log.c h = rcu_dereference_bh(hlist_next_rcu(h)); h 1037 net/netfilter/nfnetlink_log.c while (!h) { h 1046 net/netfilter/nfnetlink_log.c h = rcu_dereference_bh(hlist_first_rcu(head)); h 1048 net/netfilter/nfnetlink_log.c return h; h 119 net/netfilter/nfnetlink_queue.c unsigned int h; h 147 net/netfilter/nfnetlink_queue.c h = instance_hashfn(queue_num); h 148 net/netfilter/nfnetlink_queue.c hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); h 1426 net/netfilter/nfnetlink_queue.c static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) h 1431 net/netfilter/nfnetlink_queue.c h = h->next; h 1432 net/netfilter/nfnetlink_queue.c while (!h) { h 1439 net/netfilter/nfnetlink_queue.c h = q->instance_table[st->bucket].first; h 1441 net/netfilter/nfnetlink_queue.c return h; h 32 net/netfilter/nft_hash.c u32 h; h 34 net/netfilter/nft_hash.c h = reciprocal_scale(jhash(data, priv->len, priv->seed), h 37 net/netfilter/nft_hash.c regs->data[priv->dreg] = h + priv->offset; h 52 net/netfilter/nft_hash.c u32 h; h 54 net/netfilter/nft_hash.c h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus); h 56 net/netfilter/nft_hash.c regs->data[priv->dreg] = h + priv->offset; h 40 net/netfilter/xt_RATEEST.c unsigned int h; h 42 net/netfilter/xt_RATEEST.c h = xt_rateest_hash(est->name); h 43 net/netfilter/xt_RATEEST.c hlist_add_head(&est->list, &xn->hash[h]); h 50 net/netfilter/xt_RATEEST.c unsigned int h; h 52 net/netfilter/xt_RATEEST.c h = xt_rateest_hash(name); h 53 net/netfilter/xt_RATEEST.c hlist_for_each_entry(est, &xn->hash[h], list) { h 129 net/netfilter/xt_recent.c unsigned int h; h 132 net/netfilter/xt_recent.c h = recent_entry_hash4(addrp); h 134 net/netfilter/xt_recent.c h = recent_entry_hash6(addrp); h 136 net/netfilter/xt_recent.c list_for_each_entry(e, &table->iphash[h], list) h 59 net/netlabel/netlabel_addrlist.h struct list_head *h) h 63 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { h 71 net/netlabel/netlabel_addrlist.h struct list_head *h) h 75 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { h 126 net/netlabel/netlabel_addrlist.h struct list_head *h) h 130 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { h 138 net/netlabel/netlabel_addrlist.h struct list_head *h) h 142 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { h 224 net/netlink/diag.c static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) h 229 net/netlink/diag.c if (nlmsg_len(h) < hdrlen) h 232 net/netlink/diag.c if (h->nlmsg_flags & NLM_F_DUMP) { h 237 net/netlink/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); h 553 net/openvswitch/conntrack.c struct nf_conntrack_tuple_hash *h; h 568 net/openvswitch/conntrack.c h = nf_conntrack_find_get(net, zone, &tuple); h 569 net/openvswitch/conntrack.c if (h) { h 570 net/openvswitch/conntrack.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); h 582 net/openvswitch/conntrack.c ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h) h 584 net/openvswitch/conntrack.c const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); h 586 net/openvswitch/conntrack.c if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) h 610 net/openvswitch/conntrack.c struct nf_conntrack_tuple_hash *h; h 631 net/openvswitch/conntrack.c h = nf_conntrack_find_get(net, zone, &tuple); h 632 net/openvswitch/conntrack.c if (!h) h 635 net/openvswitch/conntrack.c ct = nf_ct_tuplehash_to_ctrack(h); h 642 net/openvswitch/conntrack.c h = &ct->tuplehash[!h->tuple.dst.dir]; h 644 net/openvswitch/conntrack.c nf_ct_set(skb, ct, ovs_ct_get_info(h)); h 363 net/packet/af_packet.c union tpacket_uhdr h; h 365 net/packet/af_packet.c h.raw = frame; h 368 net/packet/af_packet.c h.h1->tp_status = status; h 369 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h1->tp_status)); h 372 net/packet/af_packet.c h.h2->tp_status = status; h 373 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h2->tp_status)); h 376 net/packet/af_packet.c h.h3->tp_status = status; h 377 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h3->tp_status)); h 389 net/packet/af_packet.c union tpacket_uhdr h; h 393 net/packet/af_packet.c h.raw = frame; h 396 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h1->tp_status)); h 397 net/packet/af_packet.c return h.h1->tp_status; h 399 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h2->tp_status)); h 400 net/packet/af_packet.c return h.h2->tp_status; h 402 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h3->tp_status)); h 403 net/packet/af_packet.c return h.h3->tp_status; h 430 net/packet/af_packet.c union tpacket_uhdr h; h 437 net/packet/af_packet.c h.raw = frame; h 440 net/packet/af_packet.c h.h1->tp_sec = ts.tv_sec; h 441 net/packet/af_packet.c h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; h 444 net/packet/af_packet.c h.h2->tp_sec = ts.tv_sec; h 445 net/packet/af_packet.c h.h2->tp_nsec = ts.tv_nsec; h 448 net/packet/af_packet.c h.h3->tp_sec = ts.tv_sec; h 449 net/packet/af_packet.c h.h3->tp_nsec = ts.tv_nsec; h 457 net/packet/af_packet.c flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); h 469 net/packet/af_packet.c union tpacket_uhdr h; h 474 net/packet/af_packet.c h.raw = rb->pg_vec[pg_vec_pos].buffer + h 477 net/packet/af_packet.c if (status != __packet_get_status(po, h.raw)) h 480 net/packet/af_packet.c return h.raw; h 2165 net/packet/af_packet.c union tpacket_uhdr h; h 2182 net/packet/af_packet.c BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); h 2183 net/packet/af_packet.c BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); h 2273 net/packet/af_packet.c h.raw = packet_current_rx_frame(po, skb, h 2275 net/packet/af_packet.c if (!h.raw) h 2286 net/packet/af_packet.c virtio_net_hdr_from_skb(skb, h.raw + macoff - h 2310 net/packet/af_packet.c skb_copy_bits(skb, 0, h.raw + macoff, snaplen); h 2319 net/packet/af_packet.c h.h1->tp_len = skb->len; h 2320 net/packet/af_packet.c h.h1->tp_snaplen = snaplen; h 2321 net/packet/af_packet.c h.h1->tp_mac = macoff; h 2322 net/packet/af_packet.c h.h1->tp_net = netoff; h 2323 net/packet/af_packet.c h.h1->tp_sec = ts.tv_sec; h 2324 net/packet/af_packet.c h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; h 2325 net/packet/af_packet.c hdrlen = sizeof(*h.h1); h 2328 net/packet/af_packet.c h.h2->tp_len = skb->len; h 2329 net/packet/af_packet.c h.h2->tp_snaplen = snaplen; h 2330 net/packet/af_packet.c h.h2->tp_mac = macoff; h 2331 net/packet/af_packet.c h.h2->tp_net = netoff; h 2332 net/packet/af_packet.c h.h2->tp_sec = ts.tv_sec; h 2333 net/packet/af_packet.c h.h2->tp_nsec = ts.tv_nsec; h 2335 net/packet/af_packet.c h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h 2336 net/packet/af_packet.c h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); h 2339 net/packet/af_packet.c h.h2->tp_vlan_tci = 0; h 2340 net/packet/af_packet.c h.h2->tp_vlan_tpid = 0; h 2342 net/packet/af_packet.c memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); h 2343 net/packet/af_packet.c hdrlen = sizeof(*h.h2); h 2349 net/packet/af_packet.c h.h3->tp_status |= status; h 2350 net/packet/af_packet.c h.h3->tp_len = skb->len; h 2351 net/packet/af_packet.c h.h3->tp_snaplen = snaplen; h 2352 net/packet/af_packet.c h.h3->tp_mac = macoff; h 2353 net/packet/af_packet.c h.h3->tp_net = netoff; h 2354 net/packet/af_packet.c h.h3->tp_sec = ts.tv_sec; h 2355 net/packet/af_packet.c h.h3->tp_nsec = ts.tv_nsec; h 2356 net/packet/af_packet.c memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); h 2357 net/packet/af_packet.c hdrlen = sizeof(*h.h3); h 2363 net/packet/af_packet.c sll = h.raw + TPACKET_ALIGN(hdrlen); h 2380 net/packet/af_packet.c end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + h 2383 net/packet/af_packet.c for (start = h.raw; start < end; start += PAGE_SIZE) h 2391 net/packet/af_packet.c __packet_set_status(po, h.raw, status); h 224 net/packet/diag.c static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) h 230 net/packet/diag.c if (nlmsg_len(h) < hdrlen) h 233 net/packet/diag.c req = nlmsg_data(h); h 238 net/packet/diag.c if (h->nlmsg_flags & NLM_F_DUMP) { h 242 net/packet/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); h 52 net/phonet/pep.c } *ph, h; h 55 net/phonet/pep.c ph = skb_header_pointer(skb, 0, 2, &h); h 107 net/phonet/socket.c unsigned int h; h 110 net/phonet/socket.c for (h = 0; h < PN_HASHSIZE; h++) { h 535 net/phonet/socket.c unsigned int h; h 537 net/phonet/socket.c for (h = 0; h < PN_HASHSIZE; h++) { h 135 net/sched/cls_fw.c int h; h 140 net/sched/cls_fw.c for (h = 0; h < HTSIZE; h++) { h 141 net/sched/cls_fw.c while ((f = rtnl_dereference(head->ht[h])) != NULL) { h 142 net/sched/cls_fw.c RCU_INIT_POINTER(head->ht[h], h 162 net/sched/cls_fw.c int h; h 182 net/sched/cls_fw.c for (h = 0; h < HTSIZE; h++) { h 183 net/sched/cls_fw.c if (rcu_access_pointer(head->ht[h])) { h 349 net/sched/cls_fw.c int h; h 357 net/sched/cls_fw.c for (h = 0; h < HTSIZE; h++) { h 360 net/sched/cls_fw.c for (f = rtnl_dereference(head->ht[h]); f; h 79 net/sched/cls_route.c int h = route4_fastmap_hash(id, iif); h 83 net/sched/cls_route.c head->fastmap[h].id = id; h 84 net/sched/cls_route.c head->fastmap[h].iif = iif; h 85 net/sched/cls_route.c head->fastmap[h].filter = f; h 131 net/sched/cls_route.c u32 id, h; h 142 net/sched/cls_route.c h = route4_fastmap_hash(id, iif); h 145 net/sched/cls_route.c if (id == head->fastmap[h].id && h 146 net/sched/cls_route.c iif == head->fastmap[h].iif && h 147 net/sched/cls_route.c (f = head->fastmap[h].filter) != NULL) { h 159 net/sched/cls_route.c h = route4_hash_to(id); h 162 net/sched/cls_route.c b = rcu_dereference_bh(head->table[h]); h 181 net/sched/cls_route.c if (h < 256) { h 182 net/sched/cls_route.c h = 256; h 195 net/sched/cls_route.c u32 h = id & 0xFF; h 198 net/sched/cls_route.c h += 256; h 199 net/sched/cls_route.c return h; h 319 net/sched/cls_route.c unsigned int h = 0; h 325 net/sched/cls_route.c h = f->handle; h 328 net/sched/cls_route.c fp = &b->ht[from_hash(h >> 16)]; h 356 net/sched/cls_route.c RCU_INIT_POINTER(head->table[to_hash(h)], NULL); h 476 net/sched/cls_route.c unsigned int h, th; h 517 net/sched/cls_route.c h = from_hash(f->handle >> 16); h 518 net/sched/cls_route.c fp = &f->bkt->ht[h]; h 531 net/sched/cls_route.c h = from_hash(fold->handle >> 16); h 534 net/sched/cls_route.c fp = &b->ht[h]; h 565 net/sched/cls_route.c unsigned int h, h1; h 570 net/sched/cls_route.c for (h = 0; h <= 256; h++) { h 571 net/sched/cls_route.c struct route4_bucket *b = rtnl_dereference(head->table[h]); h 101 net/sched/cls_rsvp.h unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1]; h 103 net/sched/cls_rsvp.h h ^= h>>16; h 104 net/sched/cls_rsvp.h h ^= h>>8; h 105 net/sched/cls_rsvp.h return (h ^ protocol ^ tunnelid) & 0xFF; h 110 net/sched/cls_rsvp.h unsigned int h = (__force __u32)src[RSVP_DST_LEN-1]; h 112 net/sched/cls_rsvp.h h ^= h>>16; h 113 net/sched/cls_rsvp.h h ^= h>>8; h 114 net/sched/cls_rsvp.h h ^= h>>4; h 115 net/sched/cls_rsvp.h return h & 0xF; h 220 net/sched/cls_rsvp.h static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h) h 226 net/sched/cls_rsvp.h unsigned int h1 = h & 0xFF; h 227 net/sched/cls_rsvp.h unsigned int h2 = (h >> 8) & 0xFF; h 233 net/sched/cls_rsvp.h if (pins->handle == h) { h 346 net/sched/cls_rsvp.h unsigned int h = f->handle; h 351 net/sched/cls_rsvp.h fp = &s->ht[(h >> 8) & 0xFF]; h 365 net/sched/cls_rsvp.h sp = &head->ht[h & 0xFF]; h 397 net/sched/cls_rsvp.h u32 h; h 401 net/sched/cls_rsvp.h h = data->hgenerator|salt; h 402 net/sched/cls_rsvp.h if (!rsvp_get(tp, h)) h 403 net/sched/cls_rsvp.h return h; h 660 net/sched/cls_rsvp.h unsigned int h, h1; h 665 net/sched/cls_rsvp.h for (h = 0; h < 256; h++) { h 668 net/sched/cls_rsvp.h for (s = rtnl_dereference(head->ht[h]); s; h 49 net/sched/cls_tcindex.c struct tcindex_filter __rcu **h; /* imperfect hash; */ h 74 net/sched/cls_tcindex.c kfree(p->h); h 86 net/sched/cls_tcindex.c } else if (p->h) { h 90 net/sched/cls_tcindex.c fp = &p->h[key % p->hash]; h 213 net/sched/cls_tcindex.c walk = p->h + i; h 392 net/sched/cls_tcindex.c cp->h = p->h; h 409 net/sched/cls_tcindex.c } else if (cp->h && cp->hash != cp->alloc_hash) { h 417 net/sched/cls_tcindex.c if (!cp->perfect && !cp->h) h 431 net/sched/cls_tcindex.c if (!cp->perfect && !cp->h) { h 446 net/sched/cls_tcindex.c cp->h = hash; h 495 net/sched/cls_tcindex.c fp = cp->h + (handle % cp->hash); h 514 net/sched/cls_tcindex.c kfree(cp->h); h 571 net/sched/cls_tcindex.c if (!p->h) h 574 net/sched/cls_tcindex.c for (f = rtnl_dereference(p->h[i]); f; f = next) { h 616 net/sched/cls_tcindex.c for (i = 0; p->h && i < p->hash; i++) { h 620 net/sched/cls_tcindex.c for (f = rtnl_dereference(p->h[i]); f; f = next) { h 639 net/sched/cls_tcindex.c pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); h 663 net/sched/cls_tcindex.c fp = &p->h[i]; h 98 net/sched/cls_u32.c unsigned int h = ntohl(key & sel->hmask) >> fshift; h 100 net/sched/cls_u32.c return h; h 471 net/sched/cls_u32.c static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, h 477 net/sched/cls_u32.c tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack); h 479 net/sched/cls_u32.c cls_u32.hnode.divisor = h->divisor; h 480 net/sched/cls_u32.c cls_u32.hnode.handle = h->handle; h 481 net/sched/cls_u32.c cls_u32.hnode.prio = h->prio; h 486 net/sched/cls_u32.c static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, h 497 net/sched/cls_u32.c cls_u32.hnode.divisor = h->divisor; h 498 net/sched/cls_u32.c cls_u32.hnode.handle = h->handle; h 499 net/sched/cls_u32.c cls_u32.hnode.prio = h->prio; h 503 net/sched/cls_u32.c u32_clear_hw_hnode(tp, h, NULL); h 573 net/sched/cls_u32.c unsigned int h; h 575 net/sched/cls_u32.c for (h = 0; h <= ht->divisor; h++) { h 576 net/sched/cls_u32.c while ((n = rtnl_dereference(ht->ht[h])) != NULL) { h 577 net/sched/cls_u32.c RCU_INIT_POINTER(ht->ht[h], h 1117 net/sched/cls_u32.c unsigned int h; h 1134 net/sched/cls_u32.c for (h = 0; h <= ht->divisor; h++) { h 1135 net/sched/cls_u32.c for (n = rtnl_dereference(ht->ht[h]); h 1217 net/sched/cls_u32.c unsigned int h; h 1237 net/sched/cls_u32.c for (h = 0; h <= ht->divisor; h++) { h 1238 net/sched/cls_u32.c for (n = rtnl_dereference(ht->ht[h]); h 645 net/sched/sch_api.c struct hlist_head *h; h 648 net/sched/sch_api.c h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); h 650 net/sched/sch_api.c if (h != NULL) { h 652 net/sched/sch_api.c INIT_HLIST_HEAD(&h[i]); h 654 net/sched/sch_api.c return h; h 663 net/sched/sch_api.c unsigned int i, h; h 680 net/sched/sch_api.c h = qdisc_class_hash(cl->classid, nmask); h 681 net/sched/sch_api.c hlist_add_head(&cl->hnode, &nhash[h]); h 716 net/sched/sch_api.c unsigned int h; h 719 net/sched/sch_api.c h = qdisc_class_hash(cl->classid, clhash->hashmask); h 720 net/sched/sch_api.c hlist_add_head(&cl->hnode, &clhash->hash[h]); h 887 net/sched/sch_cbq.c unsigned int h; h 892 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { h 893 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { h 915 net/sched/sch_cbq.c unsigned int h; h 932 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { h 935 net/sched/sch_cbq.c hlist_for_each_entry(c, &q->clhash.hash[h], h 1031 net/sched/sch_cbq.c unsigned int h; h 1045 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { h 1046 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { h 1452 net/sched/sch_cbq.c unsigned int h; h 1462 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { h 1463 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { h 1468 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { h 1469 net/sched/sch_cbq.c hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], h 1755 net/sched/sch_cbq.c unsigned int h; h 1760 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { h 1761 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { h 903 net/sched/sch_netem.c q->clg.a3 = ge->h; h 1113 net/sched/sch_netem.c .h = q->clg.a3, h 77 net/smc/af_smc.c struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; h 80 net/smc/af_smc.c head = &h->ht; h 82 net/smc/af_smc.c write_lock_bh(&h->lock); h 85 net/smc/af_smc.c write_unlock_bh(&h->lock); h 93 net/smc/af_smc.c struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; h 95 net/smc/af_smc.c write_lock_bh(&h->lock); h 98 net/smc/af_smc.c write_unlock_bh(&h->lock); h 109 net/smc/af_smc.c .h.smc_hash = &smc_v4_hashinfo, h 121 net/smc/af_smc.c .h.smc_hash = &smc_v6_hashinfo, h 202 net/smc/smc_diag.c read_lock(&prot->h.smc_hash->lock); h 203 net/smc/smc_diag.c head = &prot->h.smc_hash->ht; h 216 net/smc/smc_diag.c read_unlock(&prot->h.smc_hash->lock); h 230 net/smc/smc_diag.c static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) h 234 net/smc/smc_diag.c if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h 235 net/smc/smc_diag.c h->nlmsg_flags & NLM_F_DUMP) { h 241 net/smc/smc_diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); h 76 net/sunrpc/auth_gss/svcauth_gss.c struct cache_head h; h 104 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *rsii = container_of(ref, struct rsi, h.ref); h 117 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *item = container_of(a, struct rsi, h); h 118 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *tmp = container_of(b, struct rsi, h); h 139 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *new = container_of(cnew, struct rsi, h); h 140 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *item = container_of(citem, struct rsi, h); h 158 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *new = container_of(cnew, struct rsi, h); h 159 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *item = container_of(citem, struct rsi, h); h 179 net/sunrpc/auth_gss/svcauth_gss.c return &rsii->h; h 185 net/sunrpc/auth_gss/svcauth_gss.c struct cache_head *h, h 188 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *rsii = container_of(h, struct rsi, h); h 228 net/sunrpc/auth_gss/svcauth_gss.c rsii.h.flags = 0; h 265 net/sunrpc/auth_gss/svcauth_gss.c rsii.h.expiry_time = expiry; h 271 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsip->h, cd); h 295 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); h 297 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsi, h); h 307 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_update(cd, &new->h, h 308 net/sunrpc/auth_gss/svcauth_gss.c &old->h, hash); h 310 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsi, h); h 338 net/sunrpc/auth_gss/svcauth_gss.c struct cache_head h; h 367 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *rsci = container_of(ref, struct rsc, h.ref); h 384 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(a, struct rsc, h); h 385 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *tmp = container_of(b, struct rsc, h); h 393 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(cnew, struct rsc, h); h 394 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *tmp = container_of(ctmp, struct rsc, h); h 407 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(cnew, struct rsc, h); h 408 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *tmp = container_of(ctmp, struct rsc, h); h 423 net/sunrpc/auth_gss/svcauth_gss.c return &rsci->h; h 448 net/sunrpc/auth_gss/svcauth_gss.c rsci.h.flags = 0; h 464 net/sunrpc/auth_gss/svcauth_gss.c set_bit(CACHE_NEGATIVE, &rsci.h.flags); h 537 net/sunrpc/auth_gss/svcauth_gss.c rsci.h.expiry_time = expiry; h 543 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rscp->h, cd); h 566 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); h 568 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsc, h); h 578 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_update(cd, &new->h, h 579 net/sunrpc/auth_gss/svcauth_gss.c &old->h, hash); h 581 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsc, h); h 600 net/sunrpc/auth_gss/svcauth_gss.c if (cache_check(cd, &found->h, NULL)) h 777 net/sunrpc/auth_gss/svcauth_gss.c struct auth_domain h; h 796 net/sunrpc/auth_gss/svcauth_gss.c struct gss_domain *gd = container_of(dom, struct gss_domain, h); h 813 net/sunrpc/auth_gss/svcauth_gss.c kref_init(&new->h.ref); h 814 net/sunrpc/auth_gss/svcauth_gss.c new->h.name = kstrdup(name, GFP_KERNEL); h 815 net/sunrpc/auth_gss/svcauth_gss.c if (!new->h.name) h 817 net/sunrpc/auth_gss/svcauth_gss.c new->h.flavour = &svcauthops_gss; h 821 net/sunrpc/auth_gss/svcauth_gss.c test = auth_domain_lookup(name, &new->h); h 822 net/sunrpc/auth_gss/svcauth_gss.c if (test != &new->h) { /* Duplicate registration */ h 824 net/sunrpc/auth_gss/svcauth_gss.c kfree(new->h.name); h 1021 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsci->h, cd); h 1189 net/sunrpc/auth_gss/svcauth_gss.c if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0) h 1205 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsip->h, sn->rsi_cache); h 1270 net/sunrpc/auth_gss/svcauth_gss.c rsci.h.expiry_time = expiry; h 1276 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rscp->h, cd); h 1567 net/sunrpc/auth_gss/svcauth_gss.c sunrpc_cache_unhash(sn->rsc_cache, &rsci->h); h 1605 net/sunrpc/auth_gss/svcauth_gss.c cache_get(&rsci->h); h 1628 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsci->h, sn->rsc_cache); h 1823 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&gsd->rsci->h, sn->rsc_cache); h 1833 net/sunrpc/auth_gss/svcauth_gss.c struct gss_domain *gd = container_of(dom, struct gss_domain, h); h 43 net/sunrpc/cache.c static void cache_init(struct cache_head *h, struct cache_detail *detail) h 46 net/sunrpc/cache.c INIT_HLIST_NODE(&h->cache_list); h 47 net/sunrpc/cache.c h->flags = 0; h 48 net/sunrpc/cache.c kref_init(&h->ref); h 49 net/sunrpc/cache.c h->expiry_time = now + CACHE_NEW_EXPIRY; h 53 net/sunrpc/cache.c h->last_refresh = now; h 214 net/sunrpc/cache.c static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) h 217 net/sunrpc/cache.c return cd->cache_upcall(cd, h); h 218 net/sunrpc/cache.c return sunrpc_cache_pipe_upcall(cd, h); h 221 net/sunrpc/cache.c static inline int cache_is_valid(struct cache_head *h) h 223 net/sunrpc/cache.c if (!test_bit(CACHE_VALID, &h->flags)) h 227 net/sunrpc/cache.c if (test_bit(CACHE_NEGATIVE, &h->flags)) h 242 net/sunrpc/cache.c static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) h 247 net/sunrpc/cache.c rv = cache_is_valid(h); h 249 net/sunrpc/cache.c set_bit(CACHE_NEGATIVE, &h->flags); h 250 net/sunrpc/cache.c cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, h 255 net/sunrpc/cache.c cache_fresh_unlocked(h, detail); h 274 net/sunrpc/cache.c struct cache_head *h, struct cache_req *rqstp) h 280 net/sunrpc/cache.c rv = cache_is_valid(h); h 283 net/sunrpc/cache.c refresh_age = (h->expiry_time - h->last_refresh); h 284 net/sunrpc/cache.c age = seconds_since_boot() - h->last_refresh; h 290 net/sunrpc/cache.c (h->expiry_time != 0 && age > refresh_age/2)) { h 293 net/sunrpc/cache.c if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { h 294 net/sunrpc/cache.c switch (cache_make_upcall(detail, h)) { h 296 net/sunrpc/cache.c rv = try_to_negate_entry(detail, h); h 299 net/sunrpc/cache.c cache_fresh_unlocked(h, detail); h 303 net/sunrpc/cache.c rv = try_to_negate_entry(detail, h); h 307 net/sunrpc/cache.c if (!cache_defer_req(rqstp, h)) { h 312 net/sunrpc/cache.c rv = cache_is_valid(h); h 318 net/sunrpc/cache.c cache_put(h, detail); h 1193 net/sunrpc/cache.c int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) h 1207 net/sunrpc/cache.c if (test_bit(CACHE_CLEANED, &h->flags)) h 1226 net/sunrpc/cache.c if (test_bit(CACHE_PENDING, &h->flags)) { h 1227 net/sunrpc/cache.c crq->item = cache_get(h); h 1266 net/sunrpc/cache.c int h, l; h 1268 net/sunrpc/cache.c h = hex_to_bin(bp[0]); h 1269 net/sunrpc/cache.c if (h < 0) h 1276 net/sunrpc/cache.c *dest++ = (h << 4) | l; h 1885 net/sunrpc/cache.c void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) h 1888 net/sunrpc/cache.c if (!hlist_unhashed(&h->cache_list)){ h 1889 net/sunrpc/cache.c hlist_del_init_rcu(&h->cache_list); h 1891 net/sunrpc/cache.c set_bit(CACHE_CLEANED, &h->flags); h 1893 net/sunrpc/cache.c cache_fresh_unlocked(h, cd); h 1894 net/sunrpc/cache.c cache_put(h, cd); h 34 net/sunrpc/svcauth_unix.c struct auth_domain h; h 44 net/sunrpc/svcauth_unix.c struct unix_domain *ud = container_of(dom, struct unix_domain, h); h 63 net/sunrpc/svcauth_unix.c if (new && rv != &new->h) h 64 net/sunrpc/svcauth_unix.c svcauth_unix_domain_release(&new->h); h 76 net/sunrpc/svcauth_unix.c kref_init(&new->h.ref); h 77 net/sunrpc/svcauth_unix.c new->h.name = kstrdup(name, GFP_KERNEL); h 78 net/sunrpc/svcauth_unix.c if (new->h.name == NULL) { h 82 net/sunrpc/svcauth_unix.c new->h.flavour = &svcauth_unix; h 83 net/sunrpc/svcauth_unix.c rv = auth_domain_lookup(name, &new->h); h 97 net/sunrpc/svcauth_unix.c struct cache_head h; h 107 net/sunrpc/svcauth_unix.c struct ip_map *im = container_of(item, struct ip_map,h); h 111 net/sunrpc/svcauth_unix.c auth_domain_put(&im->m_client->h); h 121 net/sunrpc/svcauth_unix.c struct ip_map *orig = container_of(corig, struct ip_map, h); h 122 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h); h 128 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h); h 129 net/sunrpc/svcauth_unix.c struct ip_map *item = container_of(citem, struct ip_map, h); h 136 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h); h 137 net/sunrpc/svcauth_unix.c struct ip_map *item = container_of(citem, struct ip_map, h); h 139 net/sunrpc/svcauth_unix.c kref_get(&item->m_client->h.ref); h 146 net/sunrpc/svcauth_unix.c return &i->h; h 152 net/sunrpc/svcauth_unix.c struct cache_head *h, h 156 net/sunrpc/svcauth_unix.c struct ip_map *im = container_of(h, struct ip_map, h); h 241 net/sunrpc/svcauth_unix.c container_of(dom, struct unix_domain, h), h 255 net/sunrpc/svcauth_unix.c struct cache_head *h) h 261 net/sunrpc/svcauth_unix.c if (h == NULL) { h 265 net/sunrpc/svcauth_unix.c im = container_of(h, struct ip_map, h); h 269 net/sunrpc/svcauth_unix.c if (test_bit(CACHE_VALID, &h->flags) && h 270 net/sunrpc/svcauth_unix.c !test_bit(CACHE_NEGATIVE, &h->flags)) h 271 net/sunrpc/svcauth_unix.c dom = im->m_client->h.name; h 291 net/sunrpc/svcauth_unix.c ch = sunrpc_cache_lookup_rcu(cd, &ip.h, h 296 net/sunrpc/svcauth_unix.c return container_of(ch, struct ip_map, h); h 317 net/sunrpc/svcauth_unix.c ip.h.flags = 0; h 319 net/sunrpc/svcauth_unix.c set_bit(CACHE_NEGATIVE, &ip.h.flags); h 320 net/sunrpc/svcauth_unix.c ip.h.expiry_time = expiry; h 321 net/sunrpc/svcauth_unix.c ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, h 359 net/sunrpc/svcauth_unix.c if (cache_is_expired(sn->ip_map_cache, &ipm->h)) { h 367 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, sn->ip_map_cache); h 370 net/sunrpc/svcauth_unix.c cache_get(&ipm->h); h 393 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, sn->ip_map_cache); h 407 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, sn->ip_map_cache); h 420 net/sunrpc/svcauth_unix.c struct cache_head h; h 434 net/sunrpc/svcauth_unix.c struct unix_gid *ug = container_of(item, struct unix_gid, h); h 443 net/sunrpc/svcauth_unix.c struct unix_gid *orig = container_of(corig, struct unix_gid, h); h 444 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h); h 449 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h); h 450 net/sunrpc/svcauth_unix.c struct unix_gid *item = container_of(citem, struct unix_gid, h); h 455 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h); h 456 net/sunrpc/svcauth_unix.c struct unix_gid *item = container_of(citem, struct unix_gid, h); h 465 net/sunrpc/svcauth_unix.c return &g->h; h 471 net/sunrpc/svcauth_unix.c struct cache_head *h, h 475 net/sunrpc/svcauth_unix.c struct unix_gid *ug = container_of(h, struct unix_gid, h); h 536 net/sunrpc/svcauth_unix.c ug.h.flags = 0; h 537 net/sunrpc/svcauth_unix.c ug.h.expiry_time = expiry; h 539 net/sunrpc/svcauth_unix.c &ug.h, &ugp->h, h 557 net/sunrpc/svcauth_unix.c struct cache_head *h) h 564 net/sunrpc/svcauth_unix.c if (h == NULL) { h 568 net/sunrpc/svcauth_unix.c ug = container_of(h, struct unix_gid, h); h 569 net/sunrpc/svcauth_unix.c if (test_bit(CACHE_VALID, &h->flags) && h 570 net/sunrpc/svcauth_unix.c !test_bit(CACHE_NEGATIVE, &h->flags)) h 631 net/sunrpc/svcauth_unix.c ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid)); h 633 net/sunrpc/svcauth_unix.c return container_of(ch, struct unix_gid, h); h 649 net/sunrpc/svcauth_unix.c ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle); h 657 net/sunrpc/svcauth_unix.c cache_put(&ug->h, sn->unix_gid_cache); h 701 net/sunrpc/svcauth_unix.c switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { h 711 net/sunrpc/svcauth_unix.c rqstp->rq_client = &ipm->m_client->h; h 77 net/tipc/diag.c struct nlmsghdr *h) h 82 net/tipc/diag.c if (nlmsg_len(h) < hdrlen) h 85 net/tipc/diag.c if (h->nlmsg_flags & NLM_F_DUMP) { h 91 net/tipc/diag.c netlink_dump_start(net->diag_nlsk, skb, h, &c); h 309 net/unix/diag.c static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) h 314 net/unix/diag.c if (nlmsg_len(h) < hdrlen) h 317 net/unix/diag.c if (h->nlmsg_flags & NLM_F_DUMP) { h 321 net/unix/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); h 323 net/unix/diag.c return unix_diag_get_exact(skb, h, nlmsg_data(h)); h 141 net/vmw_vsock/diag.c static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) h 146 net/vmw_vsock/diag.c if (nlmsg_len(h) < hdrlen) h 149 net/vmw_vsock/diag.c if (h->nlmsg_flags & NLM_F_DUMP) { h 153 net/vmw_vsock/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); h 440 net/vmw_vsock/hyperv_transport.c struct hvsock *h = vsk->trans; h 444 net/vmw_vsock/hyperv_transport.c h->vm_srv_id = vm.srv_id; h 448 net/vmw_vsock/hyperv_transport.c h->host_srv_id = host.srv_id; h 450 net/vmw_vsock/hyperv_transport.c return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id); h 9 net/wireless/wext-compat.h #define EXPORT_WEXT_HANDLER(h) EXPORT_SYMBOL_GPL(h) h 11 net/wireless/wext-compat.h #define EXPORT_WEXT_HANDLER(h) h 89 net/xfrm/xfrm_hash.h unsigned int h = family ^ reqid; h 92 net/xfrm/xfrm_hash.h h ^= __xfrm4_daddr_saddr_hash(daddr, saddr); h 95 net/xfrm/xfrm_hash.h h ^= __xfrm6_daddr_saddr_hash(daddr, saddr); h 98 net/xfrm/xfrm_hash.h return (h ^ (h >> 16)) & hmask; h 106 net/xfrm/xfrm_hash.h unsigned int h = family; h 109 net/xfrm/xfrm_hash.h h ^= __xfrm4_daddr_saddr_hash(daddr, saddr); h 112 net/xfrm/xfrm_hash.h h ^= __xfrm6_daddr_saddr_hash(daddr, saddr); h 115 net/xfrm/xfrm_hash.h return (h ^ (h >> 16)) & hmask; h 122 net/xfrm/xfrm_hash.h unsigned int h = (__force u32)spi ^ proto; h 125 net/xfrm/xfrm_hash.h h ^= __xfrm4_addr_hash(daddr); h 128 net/xfrm/xfrm_hash.h h ^= __xfrm6_addr_hash(daddr); h 131 net/xfrm/xfrm_hash.h return (h ^ (h >> 10) ^ (h >> 20)) & hmask; h 145 net/xfrm/xfrm_hash.h unsigned int h = 0; h 153 net/xfrm/xfrm_hash.h h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits); h 161 net/xfrm/xfrm_hash.h h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits); h 164 net/xfrm/xfrm_hash.h h ^= (h >> 16); h 165 net/xfrm/xfrm_hash.h return h & hmask; h 174 net/xfrm/xfrm_hash.h unsigned int h = 0; h 178 net/xfrm/xfrm_hash.h h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits); h 182 net/xfrm/xfrm_hash.h h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits); h 185 net/xfrm/xfrm_hash.h h ^= (h >> 16); h 186 net/xfrm/xfrm_hash.h return h & hmask; h 529 net/xfrm/xfrm_policy.c unsigned int h; h 532 net/xfrm/xfrm_policy.c h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, h 536 net/xfrm/xfrm_policy.c hlist_add_head_rcu(&pol->bydst, ndsttable + h); h 537 net/xfrm/xfrm_policy.c h0 = h; h 539 net/xfrm/xfrm_policy.c if (h != h0) h 560 net/xfrm/xfrm_policy.c unsigned int h; h 562 net/xfrm/xfrm_policy.c h = __idx_hash(pol->index, nhashmask); h 563 net/xfrm/xfrm_policy.c hlist_add_head(&pol->byidx, nidxtable+h); h 92 net/xfrm/xfrm_state.c unsigned int h; h 94 net/xfrm/xfrm_state.c h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, h 97 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bydst, ndsttable + h); h 99 net/xfrm/xfrm_state.c h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, h 102 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bysrc, nsrctable + h); h 105 net/xfrm/xfrm_state.c h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, h 108 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->byspi, nspitable + h); h 932 net/xfrm/xfrm_state.c unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); h 935 net/xfrm/xfrm_state.c hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) { h 957 net/xfrm/xfrm_state.c unsigned int h = xfrm_src_hash(net, daddr, saddr, family); h 960 net/xfrm/xfrm_state.c hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) { h 1046 net/xfrm/xfrm_state.c unsigned int h, h_wildcard; h 1061 net/xfrm/xfrm_state.c h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); h 1062 net/xfrm/xfrm_state.c hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) { h 1137 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h 1138 net/xfrm/xfrm_state.c h = xfrm_src_hash(net, daddr, saddr, encap_family); h 1139 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h); h 1141 net/xfrm/xfrm_state.c h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); h 1142 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); h 1187 net/xfrm/xfrm_state.c unsigned int h; h 1191 net/xfrm/xfrm_state.c h = xfrm_dst_hash(net, daddr, saddr, reqid, family); h 1192 net/xfrm/xfrm_state.c hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { h 1241 net/xfrm/xfrm_state.c unsigned int h; h 1245 net/xfrm/xfrm_state.c h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, h 1247 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h 1249 net/xfrm/xfrm_state.c h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); h 1250 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h); h 1253 net/xfrm/xfrm_state.c h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, h 1256 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); h 1275 net/xfrm/xfrm_state.c unsigned int h; h 1279 net/xfrm/xfrm_state.c h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); h 1280 net/xfrm/xfrm_state.c hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { h 1311 net/xfrm/xfrm_state.c unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); h 1315 net/xfrm/xfrm_state.c hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { h 1370 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h 1371 net/xfrm/xfrm_state.c h = xfrm_src_hash(net, daddr, saddr, family); h 1372 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h); h 1537 net/xfrm/xfrm_state.c unsigned int h; h 1543 net/xfrm/xfrm_state.c h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, h 1545 net/xfrm/xfrm_state.c hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { h 1560 net/xfrm/xfrm_state.c h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, h 1562 net/xfrm/xfrm_state.c hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { h 1967 net/xfrm/xfrm_state.c unsigned int h; h 1993 net/xfrm/xfrm_state.c for (h = 0; h < high-low+1; h++) { h 2005 net/xfrm/xfrm_state.c h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); h 2006 net/xfrm/xfrm_state.c hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); h 156 scripts/genksyms/genksyms.c unsigned long h = crc32(name) % HASH_BUCKETS; h 159 scripts/genksyms/genksyms.c for (sym = symtab[h]; sym; sym = sym->hash_next) h 189 scripts/genksyms/genksyms.c unsigned long h; h 229 scripts/genksyms/genksyms.c h = crc32(name) % HASH_BUCKETS; h 230 scripts/genksyms/genksyms.c for (sym = symtab[h]; sym; sym = sym->hash_next) { h 268 scripts/genksyms/genksyms.c for (psym = &symtab[h]; *psym; psym = &(*psym)->hash_next) { h 285 scripts/genksyms/genksyms.c sym->hash_next = symtab[h]; h 286 scripts/genksyms/genksyms.c symtab[h] = sym; h 470 scripts/kconfig/gconf.c gint w, h; h 473 scripts/kconfig/gconf.c gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h); h 475 scripts/kconfig/gconf.c gdk_window_get_size(widget->window, &w, &h); h 477 scripts/kconfig/gconf.c if (h == old_h) h 479 scripts/kconfig/gconf.c old_h = h; h 481 scripts/kconfig/gconf.c gtk_paned_set_position(GTK_PANED(vpaned), 2 * h / 3); h 738 scripts/kconfig/gconf.c gint w, h; h 741 scripts/kconfig/gconf.c gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h); h 51 scripts/kconfig/lxdialog/util.c #define DLG_COLOR(dialog, f, b, h) \ h 55 scripts/kconfig/lxdialog/util.c dlg.dialog.hl = (h); \ h 147 security/keys/dh.c unsigned int h = crypto_shash_digestsize(desc->tfm); h 161 security/keys/dh.c if (zlen && h) { h 187 security/keys/dh.c dlen -= h; h 188 security/keys/dh.c dst += h; h 69 security/selinux/ss/avtab.c avtab_insert_node(struct avtab *h, int hvalue, h 96 security/selinux/ss/avtab.c struct avtab_node **n = &h->htable[hvalue]; h 102 security/selinux/ss/avtab.c h->nel++; h 106 security/selinux/ss/avtab.c static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum) h 112 security/selinux/ss/avtab.c if (!h) h 115 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask); h 116 security/selinux/ss/avtab.c for (prev = NULL, cur = h->htable[hvalue]; h 139 security/selinux/ss/avtab.c newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); h 151 security/selinux/ss/avtab.c avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum) h 157 security/selinux/ss/avtab.c if (!h) h 159 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask); h 160 security/selinux/ss/avtab.c for (prev = NULL, cur = h->htable[hvalue]; h 178 security/selinux/ss/avtab.c return avtab_insert_node(h, hvalue, prev, cur, key, datum); h 181 security/selinux/ss/avtab.c struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) h 187 security/selinux/ss/avtab.c if (!h) h 190 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask); h 191 security/selinux/ss/avtab.c for (cur = h->htable[hvalue]; cur; h 217 security/selinux/ss/avtab.c avtab_search_node(struct avtab *h, struct avtab_key *key) h 223 security/selinux/ss/avtab.c if (!h) h 226 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask); h 227 security/selinux/ss/avtab.c for (cur = h->htable[hvalue]; cur; h 277 security/selinux/ss/avtab.c void avtab_destroy(struct avtab *h) h 282 security/selinux/ss/avtab.c if (!h) h 285 security/selinux/ss/avtab.c for (i = 0; i < h->nslot; i++) { h 286 security/selinux/ss/avtab.c cur = h->htable[i]; h 296 security/selinux/ss/avtab.c kvfree(h->htable); h 297 security/selinux/ss/avtab.c h->htable = NULL; h 298 security/selinux/ss/avtab.c h->nslot = 0; h 299 security/selinux/ss/avtab.c h->mask = 0; h 302 security/selinux/ss/avtab.c int avtab_init(struct avtab *h) h 304 security/selinux/ss/avtab.c kvfree(h->htable); h 305 security/selinux/ss/avtab.c h->htable = NULL; h 306 security/selinux/ss/avtab.c h->nel = 0; h 310 security/selinux/ss/avtab.c int avtab_alloc(struct avtab *h, u32 nrules) h 331 security/selinux/ss/avtab.c h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL); h 332 security/selinux/ss/avtab.c if (!h->htable) h 336 security/selinux/ss/avtab.c h->nel = 0; h 337 security/selinux/ss/avtab.c h->nslot = nslot; h 338 security/selinux/ss/avtab.c h->mask = mask; h 340 security/selinux/ss/avtab.c h->nslot, nrules); h 344 security/selinux/ss/avtab.c void avtab_hash_eval(struct avtab *h, char *tag) h 353 security/selinux/ss/avtab.c for (i = 0; i < h->nslot; i++) { h 354 security/selinux/ss/avtab.c cur = h->htable[i]; h 371 security/selinux/ss/avtab.c tag, h->nel, slots_used, h->nslot, max_chain_len, h 92 security/selinux/ss/avtab.h struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k); h 93 security/selinux/ss/avtab.h void avtab_destroy(struct avtab *h); h 94 security/selinux/ss/avtab.h void avtab_hash_eval(struct avtab *h, char *tag); h 106 security/selinux/ss/avtab.h struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, h 109 security/selinux/ss/avtab.h struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key); h 216 security/selinux/ss/conditional.c int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp) h 251 security/selinux/ss/conditional.c rc = hashtab_insert(h, key, booldatum); h 69 security/selinux/ss/conditional.h int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp); h 15 security/selinux/ss/hashtab.c struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key), h 16 security/selinux/ss/hashtab.c int (*keycmp)(struct hashtab *h, const void *key1, const void *key2), h 42 security/selinux/ss/hashtab.c int hashtab_insert(struct hashtab *h, void *key, void *datum) h 49 security/selinux/ss/hashtab.c if (!h || h->nel == HASHTAB_MAX_NODES) h 52 security/selinux/ss/hashtab.c hvalue = h->hash_value(h, key); h 54 security/selinux/ss/hashtab.c cur = h->htable[hvalue]; h 55 security/selinux/ss/hashtab.c while (cur && h->keycmp(h, key, cur->key) > 0) { h 60 security/selinux/ss/hashtab.c if (cur && (h->keycmp(h, key, cur->key) == 0)) h 72 security/selinux/ss/hashtab.c newnode->next = h->htable[hvalue]; h 73 security/selinux/ss/hashtab.c h->htable[hvalue] = newnode; h 76 security/selinux/ss/hashtab.c h->nel++; h 80 security/selinux/ss/hashtab.c void *hashtab_search(struct hashtab *h, const void *key) h 85 security/selinux/ss/hashtab.c if (!h) h 88 security/selinux/ss/hashtab.c hvalue = h->hash_value(h, key); h 89 security/selinux/ss/hashtab.c cur = h->htable[hvalue]; h 90 security/selinux/ss/hashtab.c while (cur && h->keycmp(h, key, cur->key) > 0) h 93 security/selinux/ss/hashtab.c if (!cur || (h->keycmp(h, key, cur->key) != 0)) h 99 security/selinux/ss/hashtab.c void hashtab_destroy(struct hashtab *h) h 104 security/selinux/ss/hashtab.c if (!h) h 107 security/selinux/ss/hashtab.c for (i = 0; i < h->size; i++) { h 108 security/selinux/ss/hashtab.c cur = h->htable[i]; h 114 security/selinux/ss/hashtab.c h->htable[i] = NULL; h 117 security/selinux/ss/hashtab.c kfree(h->htable); h 118 security/selinux/ss/hashtab.c h->htable = NULL; h 120 security/selinux/ss/hashtab.c kfree(h); h 123 security/selinux/ss/hashtab.c int hashtab_map(struct hashtab *h, h 131 security/selinux/ss/hashtab.c if (!h) h 134 security/selinux/ss/hashtab.c for (i = 0; i < h->size; i++) { h 135 security/selinux/ss/hashtab.c cur = h->htable[i]; h 147 security/selinux/ss/hashtab.c void hashtab_stat(struct hashtab *h, struct hashtab_info *info) h 154 security/selinux/ss/hashtab.c for (i = 0; i < h->size; i++) { h 155 security/selinux/ss/hashtab.c cur = h->htable[i]; h 26 security/selinux/ss/hashtab.h u32 (*hash_value)(struct hashtab *h, const void *key); h 28 security/selinux/ss/hashtab.h int (*keycmp)(struct hashtab *h, const void *key1, const void *key2); h 43 security/selinux/ss/hashtab.h struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key), h 44 security/selinux/ss/hashtab.h int (*keycmp)(struct hashtab *h, const void *key1, const void *key2), h 55 security/selinux/ss/hashtab.h int hashtab_insert(struct hashtab *h, void *k, void *d); h 63 security/selinux/ss/hashtab.h void *hashtab_search(struct hashtab *h, const void *k); h 68 security/selinux/ss/hashtab.h void hashtab_destroy(struct hashtab *h); h 81 security/selinux/ss/hashtab.h int hashtab_map(struct hashtab *h, h 86 security/selinux/ss/hashtab.h void hashtab_stat(struct hashtab *h, struct hashtab_info *info); h 402 security/selinux/ss/policydb.c static u32 filenametr_hash(struct hashtab *h, const void *k) h 414 security/selinux/ss/policydb.c return hash & (h->size - 1); h 417 security/selinux/ss/policydb.c static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2) h 439 security/selinux/ss/policydb.c static u32 rangetr_hash(struct hashtab *h, const void *k) h 444 security/selinux/ss/policydb.c (key->target_class << 5)) & (h->size - 1); h 447 security/selinux/ss/policydb.c static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2) h 664 security/selinux/ss/policydb.c static void hash_eval(struct hashtab *h, const char *hash_name) h 668 security/selinux/ss/policydb.c hashtab_stat(h, &info); h 670 security/selinux/ss/policydb.c hash_name, h->nel, info.slots_used, h->size, h 683 security/selinux/ss/policydb.c static inline void hash_eval(struct hashtab *h, char *hash_name) h 1082 security/selinux/ss/policydb.c static int perm_read(struct policydb *p, struct hashtab *h, void *fp) h 1105 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, perdatum); h 1115 security/selinux/ss/policydb.c static int common_read(struct policydb *p, struct hashtab *h, void *fp) h 1150 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, comdatum); h 1280 security/selinux/ss/policydb.c static int class_read(struct policydb *p, struct hashtab *h, void *fp) h 1364 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, cladatum); h 1374 security/selinux/ss/policydb.c static int role_read(struct policydb *p, struct hashtab *h, void *fp) h 1421 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, role); h 1430 security/selinux/ss/policydb.c static int type_read(struct policydb *p, struct hashtab *h, void *fp) h 1468 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, typdatum); h 1504 security/selinux/ss/policydb.c static int user_read(struct policydb *p, struct hashtab *h, void *fp) h 1545 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, usrdatum); h 1554 security/selinux/ss/policydb.c static int sens_read(struct policydb *p, struct hashtab *h, void *fp) h 1586 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, levdatum); h 1595 security/selinux/ss/policydb.c static int cat_read(struct policydb *p, struct hashtab *h, void *fp) h 1619 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, catdatum); h 1628 security/selinux/ss/policydb.c static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) = h 12 security/selinux/ss/symtab.c static unsigned int symhash(struct hashtab *h, const void *key) h 23 security/selinux/ss/symtab.c return val & (h->size - 1); h 26 security/selinux/ss/symtab.c static int symcmp(struct hashtab *h, const void *key1, const void *key2) h 40 sound/drivers/vx/vx_mixer.c u16 h; h 52 sound/drivers/vx/vx_mixer.c u16 h; h 95 sound/parisc/harmony.c harmony_read(struct snd_harmony *h, unsigned r) h 97 sound/parisc/harmony.c return __raw_readl(h->iobase + r); h 101 sound/parisc/harmony.c harmony_write(struct snd_harmony *h, unsigned r, unsigned long v) h 103 sound/parisc/harmony.c __raw_writel(v, h->iobase + r); h 107 sound/parisc/harmony.c harmony_wait_for_control(struct snd_harmony *h) h 109 sound/parisc/harmony.c while (harmony_read(h, HARMONY_CNTL) & HARMONY_CNTL_C) ; h 113 sound/parisc/harmony.c harmony_reset(struct snd_harmony *h) h 115 sound/parisc/harmony.c harmony_write(h, HARMONY_RESET, 1); h 117 sound/parisc/harmony.c harmony_write(h, HARMONY_RESET, 0); h 121 sound/parisc/harmony.c harmony_disable_interrupts(struct snd_harmony *h) h 124 sound/parisc/harmony.c harmony_wait_for_control(h); h 125 sound/parisc/harmony.c dstatus = harmony_read(h, HARMONY_DSTATUS); h 127 sound/parisc/harmony.c harmony_write(h, HARMONY_DSTATUS, dstatus); h 131 sound/parisc/harmony.c harmony_enable_interrupts(struct snd_harmony *h) h 134 sound/parisc/harmony.c harmony_wait_for_control(h); h 135 sound/parisc/harmony.c dstatus = harmony_read(h, HARMONY_DSTATUS); h 137 sound/parisc/harmony.c harmony_write(h, HARMONY_DSTATUS, dstatus); h 141 sound/parisc/harmony.c harmony_mute(struct snd_harmony *h) h 145 sound/parisc/harmony.c spin_lock_irqsave(&h->mixer_lock, flags); h 146 sound/parisc/harmony.c harmony_wait_for_control(h); h 147 sound/parisc/harmony.c harmony_write(h, HARMONY_GAINCTL, HARMONY_GAIN_SILENCE); h 148 sound/parisc/harmony.c spin_unlock_irqrestore(&h->mixer_lock, flags); h 152 sound/parisc/harmony.c harmony_unmute(struct snd_harmony *h) h 156 sound/parisc/harmony.c spin_lock_irqsave(&h->mixer_lock, flags); h 157 sound/parisc/harmony.c harmony_wait_for_control(h); h 158 sound/parisc/harmony.c harmony_write(h, HARMONY_GAINCTL, h->st.gain); h 159 sound/parisc/harmony.c spin_unlock_irqrestore(&h->mixer_lock, flags); h 163 sound/parisc/harmony.c harmony_set_control(struct snd_harmony *h) h 168 sound/parisc/harmony.c spin_lock_irqsave(&h->lock, flags); h 171 sound/parisc/harmony.c (h->st.format << 6) | h 172 sound/parisc/harmony.c (h->st.stereo << 5) | h 173 sound/parisc/harmony.c (h->st.rate)); h 175 sound/parisc/harmony.c harmony_wait_for_control(h); h 176 sound/parisc/harmony.c harmony_write(h, HARMONY_CNTL, ctrl); h 178 sound/parisc/harmony.c spin_unlock_irqrestore(&h->lock, flags); h 185 sound/parisc/harmony.c struct snd_harmony *h = dev; h 187 sound/parisc/harmony.c spin_lock(&h->lock); h 188 sound/parisc/harmony.c harmony_disable_interrupts(h); h 189 sound/parisc/harmony.c harmony_wait_for_control(h); h 190 sound/parisc/harmony.c dstatus = harmony_read(h, HARMONY_DSTATUS); h 191 sound/parisc/harmony.c spin_unlock(&h->lock); h 194 sound/parisc/harmony.c if (h->psubs && h->st.playing) { h 195 sound/parisc/harmony.c spin_lock(&h->lock); h 196 sound/parisc/harmony.c h->pbuf.buf += h->pbuf.count; /* PAGE_SIZE */ h 197 sound/parisc/harmony.c h->pbuf.buf %= h->pbuf.size; /* MAX_BUFS*PAGE_SIZE */ h 199 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h 200 sound/parisc/harmony.c h->pbuf.addr + h->pbuf.buf); h 201 sound/parisc/harmony.c h->stats.play_intr++; h 202 sound/parisc/harmony.c spin_unlock(&h->lock); h 203 sound/parisc/harmony.c snd_pcm_period_elapsed(h->psubs); h 205 sound/parisc/harmony.c spin_lock(&h->lock); h 206 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->sdma.addr); h 207 sound/parisc/harmony.c h->stats.silence_intr++; h 208 sound/parisc/harmony.c spin_unlock(&h->lock); h 213 sound/parisc/harmony.c if (h->csubs && h->st.capturing) { h 214 sound/parisc/harmony.c spin_lock(&h->lock); h 215 sound/parisc/harmony.c h->cbuf.buf += h->cbuf.count; h 216 sound/parisc/harmony.c h->cbuf.buf %= h->cbuf.size; h 218 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h 219 sound/parisc/harmony.c h->cbuf.addr + h->cbuf.buf); h 220 sound/parisc/harmony.c h->stats.rec_intr++; h 221 sound/parisc/harmony.c spin_unlock(&h->lock); h 222 sound/parisc/harmony.c snd_pcm_period_elapsed(h->csubs); h 224 sound/parisc/harmony.c spin_lock(&h->lock); h 225 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->gdma.addr); h 226 sound/parisc/harmony.c h->stats.graveyard_intr++; h 227 sound/parisc/harmony.c spin_unlock(&h->lock); h 231 sound/parisc/harmony.c spin_lock(&h->lock); h 232 sound/parisc/harmony.c harmony_enable_interrupts(h); h 233 sound/parisc/harmony.c spin_unlock(&h->lock); h 295 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 297 sound/parisc/harmony.c if (h->st.capturing) h 300 sound/parisc/harmony.c spin_lock(&h->lock); h 303 sound/parisc/harmony.c h->st.playing = 1; h 304 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->pbuf.addr); h 305 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->gdma.addr); h 306 sound/parisc/harmony.c harmony_unmute(h); h 307 sound/parisc/harmony.c harmony_enable_interrupts(h); h 310 sound/parisc/harmony.c h->st.playing = 0; h 311 sound/parisc/harmony.c harmony_mute(h); h 312 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->sdma.addr); h 313 sound/parisc/harmony.c harmony_disable_interrupts(h); h 319 sound/parisc/harmony.c spin_unlock(&h->lock); h 323 sound/parisc/harmony.c spin_unlock(&h->lock); h 331 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 333 sound/parisc/harmony.c if (h->st.playing) h 336 sound/parisc/harmony.c spin_lock(&h->lock); h 339 sound/parisc/harmony.c h->st.capturing = 1; h 340 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->sdma.addr); h 341 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->cbuf.addr); h 342 sound/parisc/harmony.c harmony_unmute(h); h 343 sound/parisc/harmony.c harmony_enable_interrupts(h); h 346 sound/parisc/harmony.c h->st.capturing = 0; h 347 sound/parisc/harmony.c harmony_mute(h); h 348 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->gdma.addr); h 349 sound/parisc/harmony.c harmony_disable_interrupts(h); h 355 sound/parisc/harmony.c spin_unlock(&h->lock); h 359 sound/parisc/harmony.c spin_unlock(&h->lock); h 365 sound/parisc/harmony.c snd_harmony_set_data_format(struct snd_harmony *h, int fmt, int force) h 367 sound/parisc/harmony.c int o = h->st.format; h 386 sound/parisc/harmony.c snd_pcm_format_set_silence(fmt, h->sdma.area, SILENCE_BUFSZ / h 397 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 400 sound/parisc/harmony.c if (h->st.capturing) h 403 sound/parisc/harmony.c h->pbuf.size = snd_pcm_lib_buffer_bytes(ss); h 404 sound/parisc/harmony.c h->pbuf.count = snd_pcm_lib_period_bytes(ss); h 405 sound/parisc/harmony.c if (h->pbuf.buf >= h->pbuf.size) h 406 sound/parisc/harmony.c h->pbuf.buf = 0; h 407 sound/parisc/harmony.c h->st.playing = 0; h 409 sound/parisc/harmony.c h->st.rate = snd_harmony_rate_bits(rt->rate); h 410 sound/parisc/harmony.c h->st.format = snd_harmony_set_data_format(h, rt->format, 0); h 413 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_STEREO; h 415 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_MONO; h 417 sound/parisc/harmony.c harmony_set_control(h); h 419 sound/parisc/harmony.c h->pbuf.addr = rt->dma_addr; h 427 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 430 sound/parisc/harmony.c if (h->st.playing) h 433 sound/parisc/harmony.c h->cbuf.size = snd_pcm_lib_buffer_bytes(ss); h 434 sound/parisc/harmony.c h->cbuf.count = snd_pcm_lib_period_bytes(ss); h 435 sound/parisc/harmony.c if (h->cbuf.buf >= h->cbuf.size) h 436 sound/parisc/harmony.c h->cbuf.buf = 0; h 437 sound/parisc/harmony.c h->st.capturing = 0; h 439 sound/parisc/harmony.c h->st.rate = snd_harmony_rate_bits(rt->rate); h 440 sound/parisc/harmony.c h->st.format = snd_harmony_set_data_format(h, rt->format, 0); h 443 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_STEREO; h 445 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_MONO; h 447 sound/parisc/harmony.c harmony_set_control(h); h 449 sound/parisc/harmony.c h->cbuf.addr = rt->dma_addr; h 458 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 462 sound/parisc/harmony.c if (!(h->st.playing) || (h->psubs == NULL)) h 465 sound/parisc/harmony.c if ((h->pbuf.addr == 0) || (h->pbuf.size == 0)) h 468 sound/parisc/harmony.c pcuradd = harmony_read(h, HARMONY_PCURADD); h 469 sound/parisc/harmony.c played = pcuradd - h->pbuf.addr; h 473 sound/parisc/harmony.c pcuradd, h->pbuf.addr, played); h 476 sound/parisc/harmony.c if (pcuradd > h->pbuf.addr + h->pbuf.size) { h 487 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 491 sound/parisc/harmony.c if (!(h->st.capturing) || (h->csubs == NULL)) h 494 sound/parisc/harmony.c if ((h->cbuf.addr == 0) || (h->cbuf.size == 0)) h 497 sound/parisc/harmony.c rcuradd = harmony_read(h, HARMONY_RCURADD); h 498 sound/parisc/harmony.c caught = rcuradd - h->cbuf.addr; h 502 sound/parisc/harmony.c rcuradd, h->cbuf.addr, caught); h 505 sound/parisc/harmony.c if (rcuradd > h->cbuf.addr + h->cbuf.size) { h 515 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 519 sound/parisc/harmony.c h->psubs = ss; h 534 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 538 sound/parisc/harmony.c h->csubs = ss; h 553 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 554 sound/parisc/harmony.c h->psubs = NULL; h 561 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 562 sound/parisc/harmony.c h->csubs = NULL; h 571 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss); h 574 sound/parisc/harmony.c if (err > 0 && h->dma.type == SNDRV_DMA_TYPE_CONTINUOUS) h 609 sound/parisc/harmony.c snd_harmony_pcm_init(struct snd_harmony *h) h 614 sound/parisc/harmony.c if (snd_BUG_ON(!h)) h 617 sound/parisc/harmony.c harmony_disable_interrupts(h); h 619 sound/parisc/harmony.c err = snd_pcm_new(h->card, "harmony", 0, 1, 1, &pcm); h 628 sound/parisc/harmony.c pcm->private_data = h; h 631 sound/parisc/harmony.c h->pcm = pcm; h 633 sound/parisc/harmony.c h->psubs = NULL; h 634 sound/parisc/harmony.c h->csubs = NULL; h 637 sound/parisc/harmony.c h->dma.type = SNDRV_DMA_TYPE_DEV; h 638 sound/parisc/harmony.c h->dma.dev = &h->dev->dev; h 639 sound/parisc/harmony.c err = snd_dma_alloc_pages(h->dma.type, h 640 sound/parisc/harmony.c h->dma.dev, h 642 sound/parisc/harmony.c &h->gdma); h 649 sound/parisc/harmony.c err = snd_dma_alloc_pages(h->dma.type, h 650 sound/parisc/harmony.c h->dma.dev, h 652 sound/parisc/harmony.c &h->sdma); h 659 sound/parisc/harmony.c snd_pcm_lib_preallocate_pages_for_all(pcm, h->dma.type, h->dma.dev, h 662 sound/parisc/harmony.c h->st.format = snd_harmony_set_data_format(h, h 669 sound/parisc/harmony.c snd_harmony_set_new_gain(struct snd_harmony *h) h 671 sound/parisc/harmony.c harmony_wait_for_control(h); h 672 sound/parisc/harmony.c harmony_write(h, HARMONY_GAINCTL, h->st.gain); h 696 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc); h 703 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock); h 705 sound/parisc/harmony.c left = (h->st.gain >> shift_left) & mask; h 706 sound/parisc/harmony.c right = (h->st.gain >> shift_right) & mask; h 716 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock); h 725 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc); h 731 sound/parisc/harmony.c int old_gain = h->st.gain; h 733 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock); h 738 sound/parisc/harmony.c h->st.gain &= ~( (mask << shift_left ) ); h 739 sound/parisc/harmony.c h->st.gain |= (left << shift_left); h 745 sound/parisc/harmony.c h->st.gain &= ~( (mask << shift_right) ); h 746 sound/parisc/harmony.c h->st.gain |= (right << shift_right); h 749 sound/parisc/harmony.c snd_harmony_set_new_gain(h); h 751 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock); h 753 sound/parisc/harmony.c return h->st.gain != old_gain; h 769 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc); h 772 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock); h 774 sound/parisc/harmony.c value = (h->st.gain >> HARMONY_GAIN_IS_SHIFT) & 1; h 777 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock); h 786 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc); h 788 sound/parisc/harmony.c int old_gain = h->st.gain; h 790 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock); h 793 sound/parisc/harmony.c h->st.gain &= ~HARMONY_GAIN_IS_MASK; h 794 sound/parisc/harmony.c h->st.gain |= value << HARMONY_GAIN_IS_SHIFT; h 796 sound/parisc/harmony.c snd_harmony_set_new_gain(h); h 798 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock); h 800 sound/parisc/harmony.c return h->st.gain != old_gain; h 835 sound/parisc/harmony.c snd_harmony_mixer_reset(struct snd_harmony *h) h 837 sound/parisc/harmony.c harmony_mute(h); h 838 sound/parisc/harmony.c harmony_reset(h); h 839 sound/parisc/harmony.c h->st.gain = HARMONY_GAIN_DEFAULT; h 840 sound/parisc/harmony.c harmony_unmute(h); h 844 sound/parisc/harmony.c snd_harmony_mixer_init(struct snd_harmony *h) h 849 sound/parisc/harmony.c if (snd_BUG_ON(!h)) h 851 sound/parisc/harmony.c card = h->card; h 856 sound/parisc/harmony.c snd_ctl_new1(&snd_harmony_controls[idx], h)); h 861 sound/parisc/harmony.c snd_harmony_mixer_reset(h); h 867 sound/parisc/harmony.c snd_harmony_free(struct snd_harmony *h) h 869 sound/parisc/harmony.c if (h->gdma.addr) h 870 sound/parisc/harmony.c snd_dma_free_pages(&h->gdma); h 871 sound/parisc/harmony.c if (h->sdma.addr) h 872 sound/parisc/harmony.c snd_dma_free_pages(&h->sdma); h 874 sound/parisc/harmony.c if (h->irq >= 0) h 875 sound/parisc/harmony.c free_irq(h->irq, h); h 877 sound/parisc/harmony.c iounmap(h->iobase); h 878 sound/parisc/harmony.c kfree(h); h 885 sound/parisc/harmony.c struct snd_harmony *h = dev->device_data; h 886 sound/parisc/harmony.c return snd_harmony_free(h); h 895 sound/parisc/harmony.c struct snd_harmony *h; h 902 sound/parisc/harmony.c h = kzalloc(sizeof(*h), GFP_KERNEL); h 903 sound/parisc/harmony.c if (h == NULL) h 906 sound/parisc/harmony.c h->hpa = padev->hpa.start; h 907 sound/parisc/harmony.c h->card = card; h 908 sound/parisc/harmony.c h->dev = padev; h 909 sound/parisc/harmony.c h->irq = -1; h 910 sound/parisc/harmony.c h->iobase = ioremap_nocache(padev->hpa.start, HARMONY_SIZE); h 911 sound/parisc/harmony.c if (h->iobase == NULL) { h 919 sound/parisc/harmony.c "harmony", h); h 925 sound/parisc/harmony.c h->irq = padev->irq; h 927 sound/parisc/harmony.c spin_lock_init(&h->mixer_lock); h 928 sound/parisc/harmony.c spin_lock_init(&h->lock); h 931 sound/parisc/harmony.c h, &ops)) < 0) { h 935 sound/parisc/harmony.c *rchip = h; h 940 sound/parisc/harmony.c snd_harmony_free(h); h 949 sound/parisc/harmony.c struct snd_harmony *h; h 955 sound/parisc/harmony.c err = snd_harmony_create(card, padev, &h); h 959 sound/parisc/harmony.c err = snd_harmony_pcm_init(h); h 963 sound/parisc/harmony.c err = snd_harmony_mixer_init(h); h 970 sound/parisc/harmony.c card->shortname, h->hpa, h->irq); h 1118 sound/pci/asihpi/hpi_internal.h struct hpi_message_header h; h 1122 sound/pci/asihpi/hpi_internal.h struct hpi_response_header h; /*v0 */ h 1127 sound/pci/asihpi/hpi_internal.h struct hpi_response_header h; h 1139 sound/pci/asihpi/hpi_internal.h struct hpi_message_header h; h 1145 sound/pci/asihpi/hpi_internal.h struct hpi_message_header h; h 1150 sound/pci/asihpi/hpi_internal.h struct hpi_response_header h; h 1176 sound/pci/asihpi/hpi_internal.h struct hpi_message_header h; h 1195 sound/pci/asihpi/hpi_internal.h struct hpi_response_header h; h 1215 sound/pci/asihpi/hpi_internal.h struct hpi_message_header_v1 h; h 1221 sound/pci/asihpi/hpi_internal.h struct hpi_response_header_v1 h; h 17 sound/pci/asihpi/hpifunc.c struct hpi_handle h; h 26 sound/pci/asihpi/hpifunc.c handle.h.adapter_index = adapter_index; h 27 sound/pci/asihpi/hpifunc.c handle.h.spare = 0; h 28 sound/pci/asihpi/hpifunc.c handle.h.read_only = 0; h 29 sound/pci/asihpi/hpifunc.c handle.h.obj_type = c_object; h 30 sound/pci/asihpi/hpifunc.c handle.h.obj_index = object_index; h 34 sound/pci/asihpi/hpifunc.c static u16 hpi_handle_indexes(const u32 h, u16 *p1, u16 *p2) h 37 sound/pci/asihpi/hpifunc.c if (!h) h 40 sound/pci/asihpi/hpifunc.c uhandle.w = h; h 42 sound/pci/asihpi/hpifunc.c *p1 = (u16)uhandle.h.adapter_index; h 44 sound/pci/asihpi/hpifunc.c *p2 = (u16)uhandle.h.obj_index; h 59 sound/pci/asihpi/hpifunc.c return (char)uhandle.h.obj_type; h 1365 sound/pci/asihpi/hpifunc.c #define hpi_control_param1_get(h, a, p1) \ h 1366 sound/pci/asihpi/hpifunc.c hpi_control_param_get(h, a, 0, 0, p1, NULL) h 1367 sound/pci/asihpi/hpifunc.c #define hpi_control_param2_get(h, a, p1, p2) \ h 1368 sound/pci/asihpi/hpifunc.c hpi_control_param_get(h, a, 0, 0, p1, p2) h 1670 sound/pci/asihpi/hpifunc.c hpi_init_message_responseV1(&hm.h, sizeof(hm), &hr, sizeof(hr), h 1673 sound/pci/asihpi/hpifunc.c if (hpi_handle_indexes(h_control, &hm.h.adapter_index, h 1674 sound/pci/asihpi/hpifunc.c &hm.h.obj_index)) h 1684 sound/pci/asihpi/hpifunc.c hm.h.size = (u16)(sizeof(hm.h) + sizeof(hm.p) + byte_count); h 1686 sound/pci/asihpi/hpifunc.c hpi_send_recvV1(&hm.h, &hr); h 1696 sound/pci/asihpi/hpifunc.c hpi_init_message_responseV1(&hm.h, sizeof(hm), &hr.h, sizeof(hr), h 1699 sound/pci/asihpi/hpifunc.c if (hpi_handle_indexes(h_control, &hm.h.adapter_index, h 1700 sound/pci/asihpi/hpifunc.c &hm.h.obj_index)) h 1710 sound/pci/asihpi/hpifunc.c hpi_send_recvV1(&hm.h, &hr.h); h 1712 sound/pci/asihpi/hpifunc.c if (!hr.h.error && pb_data) { h 1724 sound/pci/asihpi/hpifunc.c return hr.h.error; h 97 sound/pci/asihpi/hpimsgx.c struct hpi_response_header h; h 102 sound/pci/asihpi/hpimsgx.c struct hpi_response_header h; h 107 sound/pci/asihpi/hpimsgx.c struct hpi_response_header h; h 112 sound/pci/asihpi/hpimsgx.c struct hpi_response_header h; h 395 sound/pci/asihpi/hpimsgx.c [phm->obj_index].h.error) h 492 sound/pci/asihpi/hpimsgx.c [phm->obj_index].h.error) h 679 sound/pci/asihpi/hpimsgx.c rESP_HPI_ADAPTER_OPEN[adapter_index].h.error = h 681 sound/pci/asihpi/hpimsgx.c rESP_HPI_MIXER_OPEN[adapter_index].h.error = h 684 sound/pci/asihpi/hpimsgx.c rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error = h 686 sound/pci/asihpi/hpimsgx.c rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error = h 142 sound/pci/asihpi/hpioctl.c hm->h.size = msg_size; h 157 sound/pci/asihpi/hpioctl.c switch (hm->h.function) { h 161 sound/pci/asihpi/hpioctl.c hr->h.size = sizeof(hr->h); h 162 sound/pci/asihpi/hpioctl.c hr->h.error = HPI_ERROR_INVALID_OPERATION; h 163 sound/pci/asihpi/hpioctl.c hr->h.function = hm->h.function; h 164 sound/pci/asihpi/hpioctl.c uncopied_bytes = copy_to_user(puhr, hr, hr->h.size); h 172 sound/pci/asihpi/hpioctl.c hr->h.size = res_max_size; h 173 sound/pci/asihpi/hpioctl.c if (hm->h.object == HPI_OBJ_SUBSYSTEM) { h 182 sound/pci/asihpi/hpioctl.c if (hm->h.adapter_index < ARRAY_SIZE(adapters)) h 183 sound/pci/asihpi/hpioctl.c pa = &adapters[array_index_nospec(hm->h.adapter_index, h 187 sound/pci/asihpi/hpioctl.c hpi_init_response(&hr->r0, hm->h.object, h 188 sound/pci/asihpi/hpioctl.c hm->h.function, HPI_ERROR_BAD_ADAPTER_NUMBER); h 191 sound/pci/asihpi/hpioctl.c copy_to_user(puhr, hr, sizeof(hr->h)); h 205 sound/pci/asihpi/hpioctl.c switch (hm->h.function) { h 220 sound/pci/asihpi/hpioctl.c hm->h.adapter_index, h 242 sound/pci/asihpi/hpioctl.c if (hm->h.function == HPI_ISTREAM_READ) h 282 sound/pci/asihpi/hpioctl.c if (!hr->h.size) { h 288 sound/pci/asihpi/hpioctl.c if (hr->h.size > res_max_size) { h 289 sound/pci/asihpi/hpioctl.c HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size, h 291 sound/pci/asihpi/hpioctl.c hr->h.error = HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL; h 292 sound/pci/asihpi/hpioctl.c hr->h.specific_error = hr->h.size; h 293 sound/pci/asihpi/hpioctl.c hr->h.size = sizeof(hr->h); h 296 sound/pci/asihpi/hpioctl.c uncopied_bytes = copy_to_user(puhr, hr, hr->h.size); h 990 sound/pci/oxygen/xonar_wm87x6.c #define WM8776_FIELD_CTL_VOLUME(a, b, c, d, e, f, g, h, tlv_p) { \ h 991 sound/pci/oxygen/xonar_wm87x6.c _WM8776_FIELD_CTL(a " Capture Volume", b, c, d, e, f, g, h), \ h 251 sound/pci/riptide/riptide.c #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0)) /* select paths for internal connections */ h 5180 sound/soc/codecs/rt5677.c static int rt5677_irq_map(struct irq_domain *h, unsigned int virq, h 5183 sound/soc/codecs/rt5677.c struct rt5677_priv *rt5677 = h->host_data; h 21 tools/include/asm-generic/bitops/fls64.h __u32 h = x >> 32; h 22 tools/include/asm-generic/bitops/fls64.h if (h) h 23 tools/include/asm-generic/bitops/fls64.h return fls(h) + 32; h 21 tools/include/linux/bits.h #define GENMASK(h, l) \ h 23 tools/include/linux/bits.h (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) h 25 tools/include/linux/bits.h #define GENMASK_ULL(h, l) \ h 27 tools/include/linux/bits.h (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) h 598 tools/include/linux/list.h static inline void INIT_HLIST_NODE(struct hlist_node *h) h 600 tools/include/linux/list.h h->next = NULL; h 601 tools/include/linux/list.h h->pprev = NULL; h 604 tools/include/linux/list.h static inline int hlist_unhashed(const struct hlist_node *h) h 606 tools/include/linux/list.h return !h->pprev; h 609 tools/include/linux/list.h static inline int hlist_empty(const struct hlist_head *h) h 611 tools/include/linux/list.h return !h->first; h 639 tools/include/linux/list.h static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) h 641 tools/include/linux/list.h struct hlist_node *first = h->first; h 645 tools/include/linux/list.h h->first = n; h 646 tools/include/linux/list.h n->pprev = &h->first; h 676 tools/include/linux/list.h static inline bool hlist_fake(struct hlist_node *h) h 678 tools/include/linux/list.h return h->pprev == &h->next; h 204 tools/include/uapi/drm/i915_drm.h unsigned int h; h 138 tools/include/uapi/linux/pkt_cls.h #define TC_U32_HTID(h) ((h)&0xFFF00000) h 139 tools/include/uapi/linux/pkt_cls.h #define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20) h 140 tools/include/uapi/linux/pkt_cls.h #define TC_U32_HASH(h) (((h)>>12)&0xFF) h 141 tools/include/uapi/linux/pkt_cls.h #define TC_U32_NODE(h) ((h)&0xFFF) h 142 tools/include/uapi/linux/pkt_cls.h #define TC_U32_KEY(h) ((h)&0xFFFFF) h 69 tools/include/uapi/linux/pkt_sched.h #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) h 70 tools/include/uapi/linux/pkt_sched.h #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) h 652 tools/include/uapi/linux/pkt_sched.h __u32 h; h 1313 tools/lib/bpf/btf.c static long hash_combine(long h, long value) h 1315 tools/lib/bpf/btf.c return h * 31 + value; h 1726 tools/lib/bpf/btf.c long h; h 1728 tools/lib/bpf/btf.c h = hash_combine(0, t->name_off); h 1729 tools/lib/bpf/btf.c h = hash_combine(h, t->info); h 1730 tools/lib/bpf/btf.c h = hash_combine(h, t->size); h 1731 tools/lib/bpf/btf.c return h; h 1745 tools/lib/bpf/btf.c long h; h 1747 tools/lib/bpf/btf.c h = btf_hash_common(t); h 1748 tools/lib/bpf/btf.c h = hash_combine(h, info); h 1749 tools/lib/bpf/btf.c return h; h 1767 tools/lib/bpf/btf.c long h; h 1770 tools/lib/bpf/btf.c h = hash_combine(0, t->name_off); h 1771 tools/lib/bpf/btf.c h = hash_combine(h, t->info & ~0xffff); h 1772 tools/lib/bpf/btf.c h = hash_combine(h, t->size); h 1773 tools/lib/bpf/btf.c return h; h 1822 tools/lib/bpf/btf.c long h = btf_hash_common(t); h 1826 tools/lib/bpf/btf.c h = hash_combine(h, member->name_off); h 1827 tools/lib/bpf/btf.c h = hash_combine(h, member->offset); h 1831 tools/lib/bpf/btf.c return h; h 1868 tools/lib/bpf/btf.c long h = btf_hash_common(t); h 1870 tools/lib/bpf/btf.c h = hash_combine(h, info->type); h 1871 tools/lib/bpf/btf.c h = hash_combine(h, info->index_type); h 1872 tools/lib/bpf/btf.c h = hash_combine(h, info->nelems); h 1873 tools/lib/bpf/btf.c return h; h 1919 tools/lib/bpf/btf.c long h = btf_hash_common(t); h 1923 tools/lib/bpf/btf.c h = hash_combine(h, member->name_off); h 1924 tools/lib/bpf/btf.c h = hash_combine(h, member->type); h 1927 tools/lib/bpf/btf.c return h; h 1999 tools/lib/bpf/btf.c long h; h 2017 tools/lib/bpf/btf.c h = btf_hash_int(t); h 2018 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2029 tools/lib/bpf/btf.c h = btf_hash_enum(t); h 2030 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2052 tools/lib/bpf/btf.c h = btf_hash_common(t); h 2053 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2068 tools/lib/bpf/btf.c if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) h 2463 tools/lib/bpf/btf.c long h; h 2475 tools/lib/bpf/btf.c h = btf_hash_struct(t); h 2476 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2506 tools/lib/bpf/btf.c if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) h 2555 tools/lib/bpf/btf.c long h; h 2577 tools/lib/bpf/btf.c h = btf_hash_common(t); h 2578 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2601 tools/lib/bpf/btf.c h = btf_hash_array(t); h 2602 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2633 tools/lib/bpf/btf.c h = btf_hash_fnproto(t); h 2634 tools/lib/bpf/btf.c for_each_dedup_cand(d, hash_entry, h) { h 2650 tools/lib/bpf/btf.c if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) h 91 tools/lib/bpf/btf_dump.c size_t h = 0; h 94 tools/lib/bpf/btf_dump.c h = h * 31 + *s; h 97 tools/lib/bpf/btf_dump.c return h; h 93 tools/lib/bpf/hashmap.c size_t h; h 106 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits); h 107 tools/lib/bpf/hashmap.c hashmap_add_entry(&new_buckets[h], cur); h 147 tools/lib/bpf/hashmap.c size_t h; h 155 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); h 157 tools/lib/bpf/hashmap.c hashmap_find_entry(map, key, h, NULL, &entry)) { h 179 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); h 188 tools/lib/bpf/hashmap.c hashmap_add_entry(&map->buckets[h], entry); h 197 tools/lib/bpf/hashmap.c size_t h; h 199 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); h 200 tools/lib/bpf/hashmap.c if (!hashmap_find_entry(map, key, h, NULL, &entry)) h 212 tools/lib/bpf/hashmap.c size_t h; h 214 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); h 215 tools/lib/bpf/hashmap.c if (!hashmap_find_entry(map, key, h, &pprev, &entry)) h 20 tools/lib/bpf/hashmap.h static inline size_t hash_bits(size_t h, int bits) h 23 tools/lib/bpf/hashmap.h return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); h 122 tools/lib/subcmd/parse-options.h #define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) } h 123 tools/lib/subcmd/parse-options.h #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } h 124 tools/lib/subcmd/parse-options.h #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) } h 125 tools/lib/subcmd/parse-options.h #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) } h 126 tools/lib/subcmd/parse-options.h #define OPT_BOOLEAN_FLAG(s, l, v, h, f) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h), .flags = (f) } h 127 tools/lib/subcmd/parse-options.h #define OPT_BOOLEAN_SET(s, l, v, os, h) \ h 129 tools/lib/subcmd/parse-options.h .value = check_vtype(v, bool *), .help = (h), \ h 131 tools/lib/subcmd/parse-options.h #define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } h 132 tools/lib/subcmd/parse-options.h #define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) } h 133 tools/lib/subcmd/parse-options.h #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } h 134 tools/lib/subcmd/parse-options.h #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } h 135 tools/lib/subcmd/parse-options.h #define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) } h 136 tools/lib/subcmd/parse-options.h #define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) } h 137 tools/lib/subcmd/parse-options.h #define OPT_ULONG(s, l, v, h) { .type = OPTION_ULONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned long *), .help = (h) } h 138 tools/lib/subcmd/parse-options.h #define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) } h 139 tools/lib/subcmd/parse-options.h #define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h) } h 140 tools/lib/subcmd/parse-options.h #define OPT_STRING_OPTARG(s, l, v, a, h, d) \ h 142 tools/lib/subcmd/parse-options.h .value = check_vtype(v, const char **), .argh =(a), .help = (h), \ h 144 tools/lib/subcmd/parse-options.h #define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \ h 146 tools/lib/subcmd/parse-options.h .value = check_vtype(v, const char **), .argh = (a), .help = (h), \ h 149 tools/lib/subcmd/parse-options.h #define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h), .flags = PARSE_OPT_NOEMPTY} h 150 tools/lib/subcmd/parse-options.h #define OPT_DATE(s, l, v, h) \ h 151 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } h 152 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK(s, l, v, a, h, f) \ h 153 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f) } h 154 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \ h 155 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } h 156 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ h 157 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } h 158 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \ h 160 tools/lib/subcmd/parse-options.h .value = (v), .arg = (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\ h 162 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_OPTARG(s, l, v, d, a, h, f) \ h 164 tools/lib/subcmd/parse-options.h .value = (v), .argh = (a), .help = (h), .callback = (f), \ h 1154 tools/perf/builtin-timechart.c double h = (double)sample->bytes / c->max_bytes; h 1161 tools/perf/builtin-timechart.c h = 1; h 1185 tools/perf/builtin-timechart.c h, h 1194 tools/perf/builtin-timechart.c h, h 1203 tools/perf/builtin-timechart.c h, h 1212 tools/perf/builtin-timechart.c h, h 312 tools/perf/ui/browser.c int height = browser->height, h = 0, pct = 0, h 323 tools/perf/ui/browser.c while (h < height) { h 325 tools/perf/ui/browser.c SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_CKBRD_CHAR); h 326 tools/perf/ui/browser.c ++h; h 675 tools/perf/ui/browsers/hists.c struct hist_entry *h = rb_entry(browser->b.top, h 685 tools/perf/ui/browsers/hists.c h->row_offset, h->nr_rows); h 1777 tools/perf/ui/browsers/hists.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 1780 tools/perf/ui/browsers/hists.c if (h->filtered) { h 1782 tools/perf/ui/browsers/hists.c h->unfolded = false; h 1786 tools/perf/ui/browsers/hists.c percent = hist_entry__get_percent_limit(h); h 1791 tools/perf/ui/browsers/hists.c row += hist_browser__show_hierarchy_entry(hb, h, row, h 1792 tools/perf/ui/browsers/hists.c h->depth); h 1796 tools/perf/ui/browsers/hists.c if (h->has_no_entry) { h 1797 tools/perf/ui/browsers/hists.c hist_browser__show_no_entry(hb, row, h->depth + 1); h 1801 tools/perf/ui/browsers/hists.c row += hist_browser__show_entry(hb, h, row); h 1815 tools/perf/ui/browsers/hists.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 1816 tools/perf/ui/browsers/hists.c float percent = hist_entry__get_percent_limit(h); h 1818 tools/perf/ui/browsers/hists.c if (!h->filtered && percent >= min_pcnt) h 1838 tools/perf/ui/browsers/hists.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 1839 tools/perf/ui/browsers/hists.c float percent = hist_entry__get_percent_limit(h); h 1841 tools/perf/ui/browsers/hists.c if (!h->filtered && percent >= min_pcnt) h 1853 tools/perf/ui/browsers/hists.c struct hist_entry *h; h 1886 tools/perf/ui/browsers/hists.c h = rb_entry(browser->top, struct hist_entry, rb_node); h 1887 tools/perf/ui/browsers/hists.c h->row_offset = 0; h 1908 tools/perf/ui/browsers/hists.c h = rb_entry(nd, struct hist_entry, rb_node); h 1909 tools/perf/ui/browsers/hists.c if (h->unfolded && h->leaf) { h 1910 tools/perf/ui/browsers/hists.c u16 remaining = h->nr_rows - h->row_offset; h 1913 tools/perf/ui/browsers/hists.c h->row_offset = 0; h 1915 tools/perf/ui/browsers/hists.c h->row_offset += offset; h 1930 tools/perf/ui/browsers/hists.c h = rb_entry(nd, struct hist_entry, rb_node); h 1931 tools/perf/ui/browsers/hists.c if (h->unfolded && h->leaf) { h 1933 tools/perf/ui/browsers/hists.c if (-offset > h->row_offset) { h 1934 tools/perf/ui/browsers/hists.c offset += h->row_offset; h 1935 tools/perf/ui/browsers/hists.c h->row_offset = 0; h 1937 tools/perf/ui/browsers/hists.c h->row_offset += offset; h 1943 tools/perf/ui/browsers/hists.c if (-offset > h->nr_rows) { h 1944 tools/perf/ui/browsers/hists.c offset += h->nr_rows; h 1945 tools/perf/ui/browsers/hists.c h->row_offset = 0; h 1947 tools/perf/ui/browsers/hists.c h->row_offset = h->nr_rows + offset; h 1967 tools/perf/ui/browsers/hists.c h = rb_entry(nd, struct hist_entry, rb_node); h 1968 tools/perf/ui/browsers/hists.c if (h->unfolded && h->leaf) h 1969 tools/perf/ui/browsers/hists.c h->row_offset = h->nr_rows; h 1976 tools/perf/ui/browsers/hists.c h = rb_entry(nd, struct hist_entry, rb_node); h 1977 tools/perf/ui/browsers/hists.c h->row_offset = 0; h 2102 tools/perf/ui/browsers/hists.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 2106 tools/perf/ui/browsers/hists.c h, fp, h 2107 tools/perf/ui/browsers/hists.c h->depth); h 2109 tools/perf/ui/browsers/hists.c printed += hist_browser__fprintf_entry(browser, h, fp); h 359 tools/perf/ui/gtk/hists.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 361 tools/perf/ui/gtk/hists.c u64 total = hists__total_period(h->hists); h 364 tools/perf/ui/gtk/hists.c if (h->filtered) h 367 tools/perf/ui/gtk/hists.c percent = hist_entry__get_percent_limit(h); h 376 tools/perf/ui/gtk/hists.c if (perf_hpp__should_skip(fmt, h->hists)) h 380 tools/perf/ui/gtk/hists.c fmt->color(fmt, &hpp, h); h 382 tools/perf/ui/gtk/hists.c fmt->entry(fmt, &hpp, h); h 387 tools/perf/ui/gtk/hists.c if (hist_entry__has_callchains(h) && h 391 tools/perf/ui/gtk/hists.c h->stat_acc->period : h->stat.period; h 393 tools/perf/ui/gtk/hists.c perf_gtk__add_callchain(&h->sorted_chain, store, &iter, h 831 tools/perf/ui/stdio/hist.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 834 tools/perf/ui/stdio/hist.c if (h->filtered) h 837 tools/perf/ui/stdio/hist.c percent = hist_entry__get_percent_limit(h); h 841 tools/perf/ui/stdio/hist.c ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains); h 850 tools/perf/ui/stdio/hist.c if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) { h 851 tools/perf/ui/stdio/hist.c int depth = hists->nr_hpp_node + h->depth + 1; h 860 tools/perf/ui/stdio/hist.c if (h->ms.map == NULL && verbose > 1) { h 861 tools/perf/ui/stdio/hist.c map_groups__fprintf(h->thread->mg, fp); h 868 tools/perf/util/annotate.c struct sym_hist *h; h 880 tools/perf/util/annotate.c h = annotated_source__histogram(src, evidx); h 881 tools/perf/util/annotate.c if (h == NULL) { h 886 tools/perf/util/annotate.c h->nr_samples++; h 887 tools/perf/util/annotate.c h->addr[offset].nr_samples++; h 888 tools/perf/util/annotate.c h->period += sample->period; h 889 tools/perf/util/annotate.c h->addr[offset].period += sample->period; h 894 tools/perf/util/annotate.c h->addr[offset].nr_samples, h->addr[offset].period); h 2243 tools/perf/util/annotate.c struct sym_hist *h = annotation__histogram(notes, evsel->idx); h 2247 tools/perf/util/annotate.c if (h->addr[offset].nr_samples != 0) h 2249 tools/perf/util/annotate.c sym->start + offset, h->addr[offset].nr_samples); h 2250 tools/perf/util/annotate.c printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); h 2275 tools/perf/util/annotate.c struct sym_hist *h = annotation__histogram(notes, evsel->idx); h 2307 tools/perf/util/annotate.c d_filename, evsel_name, h->nr_samples, h 2466 tools/perf/util/annotate.c struct sym_hist *h = annotation__histogram(notes, evidx); h 2468 tools/perf/util/annotate.c memset(h, 0, notes->src->sizeof_sym_hist); h 2474 tools/perf/util/annotate.c struct sym_hist *h = annotation__histogram(notes, evidx); h 2477 tools/perf/util/annotate.c h->nr_samples = 0; h 2479 tools/perf/util/annotate.c h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8; h 2480 tools/perf/util/annotate.c h->nr_samples += h->addr[offset].nr_samples; h 1235 tools/perf/util/evsel.c struct perf_evsel_config_term *term, *h; h 1237 tools/perf/util/evsel.c list_for_each_entry_safe(term, h, &evsel->config_terms, list) { h 77 tools/perf/util/hist.c void hists__calc_col_len(struct hists *hists, struct hist_entry *h) h 88 tools/perf/util/hist.c if (h->ms.sym) { h 89 tools/perf/util/hist.c symlen = h->ms.sym->namelen + 4; h 99 tools/perf/util/hist.c len = thread__comm_len(h->thread); h 103 tools/perf/util/hist.c if (h->ms.map) { h 104 tools/perf/util/hist.c len = dso__name_len(h->ms.map->dso); h 108 tools/perf/util/hist.c if (h->parent) h 109 tools/perf/util/hist.c hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); h 111 tools/perf/util/hist.c if (h->branch_info) { h 112 tools/perf/util/hist.c if (h->branch_info->from.sym) { h 113 tools/perf/util/hist.c symlen = (int)h->branch_info->from.sym->namelen + 4; h 118 tools/perf/util/hist.c symlen = dso__name_len(h->branch_info->from.map->dso); h 126 tools/perf/util/hist.c if (h->branch_info->to.sym) { h 127 tools/perf/util/hist.c symlen = (int)h->branch_info->to.sym->namelen + 4; h 132 tools/perf/util/hist.c symlen = dso__name_len(h->branch_info->to.map->dso); h 140 tools/perf/util/hist.c if (h->branch_info->srcline_from) h 142 tools/perf/util/hist.c strlen(h->branch_info->srcline_from)); h 143 tools/perf/util/hist.c if (h->branch_info->srcline_to) h 145 tools/perf/util/hist.c strlen(h->branch_info->srcline_to)); h 148 tools/perf/util/hist.c if (h->mem_info) { h 149 tools/perf/util/hist.c if (h->mem_info->daddr.sym) { h 150 tools/perf/util/hist.c symlen = (int)h->mem_info->daddr.sym->namelen + 4 h 164 tools/perf/util/hist.c if (h->mem_info->iaddr.sym) { h 165 tools/perf/util/hist.c symlen = (int)h->mem_info->iaddr.sym->namelen + 4 h 175 tools/perf/util/hist.c if (h->mem_info->daddr.map) { h 176 tools/perf/util/hist.c symlen = dso__name_len(h->mem_info->daddr.map->dso); h 208 tools/perf/util/hist.c if (h->srcline) { h 209 tools/perf/util/hist.c len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); h 213 tools/perf/util/hist.c if (h->srcfile) h 214 tools/perf/util/hist.c hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); h 216 tools/perf/util/hist.c if (h->transaction) h 220 tools/perf/util/hist.c if (h->trace_output) h 221 tools/perf/util/hist.c hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); h 1660 tools/perf/util/hist.c static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) h 1663 tools/perf/util/hist.c hists->stats.total_non_filtered_period += h->stat.period; h 1666 tools/perf/util/hist.c void hists__inc_stats(struct hists *hists, struct hist_entry *h) h 1668 tools/perf/util/hist.c if (!h->filtered) h 1669 tools/perf/util/hist.c hists__inc_filter_stats(hists, h); h 1672 tools/perf/util/hist.c hists->stats.total_period += h->stat.period; h 1998 tools/perf/util/hist.c static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, h 2001 tools/perf/util/hist.c h->filtered &= ~(1 << filter); h 2004 tools/perf/util/hist.c struct hist_entry *parent = h->parent_he; h 2007 tools/perf/util/hist.c he_stat__add_stat(&parent->stat, &h->stat); h 2024 tools/perf/util/hist.c if (h->filtered) h 2028 tools/perf/util/hist.c h->unfolded = false; h 2029 tools/perf/util/hist.c h->has_no_entry = false; h 2030 tools/perf/util/hist.c h->row_offset = 0; h 2031 tools/perf/util/hist.c h->nr_rows = 0; h 2033 tools/perf/util/hist.c hists->stats.nr_non_filtered_samples += h->stat.nr_events; h 2035 tools/perf/util/hist.c hists__inc_filter_stats(hists, h); h 2036 tools/perf/util/hist.c hists__calc_col_len(hists, h); h 2101 tools/perf/util/hist.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 2103 tools/perf/util/hist.c if (filter(hists, h)) h 2106 tools/perf/util/hist.c hists__remove_entry_filter(hists, h, type); h 2140 tools/perf/util/hist.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 2143 tools/perf/util/hist.c rb_erase_cached(&h->rb_node, &he->hroot_out); h 2145 tools/perf/util/hist.c resort_filtered_entry(&new_root, h); h 2163 tools/perf/util/hist.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 2166 tools/perf/util/hist.c ret = hist_entry__filter(h, type, arg); h 2173 tools/perf/util/hist.c memset(&h->stat, 0, sizeof(h->stat)); h 2174 tools/perf/util/hist.c h->filtered |= (1 << type); h 2176 tools/perf/util/hist.c nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); h 2183 tools/perf/util/hist.c h->filtered |= (1 << type); h 2185 tools/perf/util/hist.c nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); h 2193 tools/perf/util/hist.c hists__remove_entry_filter(hists, h, type); h 2195 tools/perf/util/hist.c nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); h 2207 tools/perf/util/hist.c struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); h 2210 tools/perf/util/hist.c rb_erase_cached(&h->rb_node, &hists->entries); h 2212 tools/perf/util/hist.c resort_filtered_entry(&new_root, h); h 191 tools/perf/util/hist.h void hists__inc_stats(struct hists *hists, struct hist_entry *h); h 2127 tools/perf/util/machine.c int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; h 2130 tools/perf/util/machine.c if (chash[h] == NO_ENTRY) { h 2131 tools/perf/util/machine.c chash[h] = i; h 2132 tools/perf/util/machine.c } else if (l[chash[h]].from == l[i].from) { h 2136 tools/perf/util/machine.c for (j = chash[h]; j < i && i + off < nr; j++, off++) h 2786 tools/perf/util/parse-events.c struct parse_events_term *term, *h; h 2788 tools/perf/util/parse-events.c list_for_each_entry_safe(term, h, terms, list) { h 1217 tools/perf/util/pmu.c struct parse_events_term *term, *h; h 1233 tools/perf/util/pmu.c list_for_each_entry_safe(term, h, head_terms, list) { h 41 tools/perf/util/srccode.c unsigned h = 0; h 43 tools/perf/util/srccode.c h = 65599 * h + *s++; h 44 tools/perf/util/srccode.c return h ^ (h >> 16); h 99 tools/perf/util/srccode.c struct srcfile *h; h 104 tools/perf/util/srccode.c hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) { h 105 tools/perf/util/srccode.c if (!strcmp(fn, h->fn)) { h 107 tools/perf/util/srccode.c list_del(&h->nd); h 108 tools/perf/util/srccode.c list_add(&h->nd, &srcfile_list); h 109 tools/perf/util/srccode.c return h; h 117 tools/perf/util/srccode.c h = list_entry(srcfile_list.prev, struct srcfile, nd); h 118 tools/perf/util/srccode.c free_srcfile(h); h 127 tools/perf/util/srccode.c h = malloc(sizeof(struct srcfile)); h 128 tools/perf/util/srccode.c if (!h) h 131 tools/perf/util/srccode.c h->fn = strdup(fn); h 132 tools/perf/util/srccode.c if (!h->fn) h 135 tools/perf/util/srccode.c h->maplen = st.st_size; h 136 tools/perf/util/srccode.c sz = (h->maplen + page_size - 1) & ~(page_size - 1); h 137 tools/perf/util/srccode.c h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); h 139 tools/perf/util/srccode.c if (h->map == (char *)-1) { h 143 tools/perf/util/srccode.c h->numlines = countlines(h->map, h->maplen); h 144 tools/perf/util/srccode.c h->lines = calloc(h->numlines, sizeof(char *)); h 145 tools/perf/util/srccode.c if (!h->lines) h 147 tools/perf/util/srccode.c fill_lines(h->lines, h->numlines, h->map, h->maplen); h 148 tools/perf/util/srccode.c list_add(&h->nd, &srcfile_list); h 149 tools/perf/util/srccode.c hlist_add_head(&h->hash_nd, &srcfile_htab[hval]); h 150 tools/perf/util/srccode.c map_total_sz += h->maplen; h 152 tools/perf/util/srccode.c return h; h 155 tools/perf/util/srccode.c munmap(h->map, sz); h 157 tools/perf/util/srccode.c zfree(&h->fn); h 159 tools/perf/util/srccode.c free(h); h 1063 tools/perf/util/stat-display.c int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5); h 1068 tools/perf/util/stat-display.c for (h = 0; h < n; h++) h 32 tools/power/x86/intel-speed-select/isst.h #define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h)))) h 33 tools/power/x86/intel-speed-select/isst.h #define GENMASK_ULL(h, l) \ h 34 tools/power/x86/intel-speed-select/isst.h (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h)))) h 691 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str) h 700 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp, h 701 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7, h->hwp_use_pkg); h 703 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str) h 711 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp, h 712 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7); h 122 tools/testing/selftests/bpf/prog_tests/core_reloc.c .h = { .y = 8 }, \ h 127 tools/testing/selftests/bpf/prog_tests/core_reloc.c .e = 5, .f = 6, .g = 7, .h = 8, \ h 54 tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c struct hlist_head h; h 90 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c int h; h 107 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c fn_ptr2_t h; h 191 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c const int_t *h; h 465 tools/testing/selftests/bpf/progs/core_reloc_types.h int a, b, c, d, e, f, g, h; h 490 tools/testing/selftests/bpf/progs/core_reloc_types.h core_reloc_mods_substruct_t h; h 504 tools/testing/selftests/bpf/progs/core_reloc_types.h } h; h 524 tools/testing/selftests/bpf/progs/core_reloc_types.h core_reloc_mods_substruct_tt h; h 16 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c int a, b, c, d, e, f, g, h; h 41 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c core_reloc_mods_substruct_t h; h 57 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c BPF_CORE_READ(&out->h, &in->h.y)) h 317 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define __AS_ATHIGH h h 441 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h lis r4,KERNELBASE@h; \ h 462 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h lis scratch,0x60000000@h; \ h 504 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h h 505 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h h 829 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h lis reg,BUCSR_INIT@h; \ h 150 tools/testing/selftests/proc/proc-pid-vm.c struct elf64_hdr h; h 154 tools/testing/selftests/proc/proc-pid-vm.c {&h, sizeof(struct elf64_hdr)}, h 161 tools/testing/selftests/proc/proc-pid-vm.c memset(&h, 0, sizeof(h)); h 162 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[0] = 0x7f; h 163 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[1] = 'E'; h 164 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[2] = 'L'; h 165 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[3] = 'F'; h 166 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[4] = 2; h 167 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[5] = 1; h 168 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[6] = 1; h 169 tools/testing/selftests/proc/proc-pid-vm.c h.e_ident[7] = 0; h 170 tools/testing/selftests/proc/proc-pid-vm.c h.e_type = 2; h 171 tools/testing/selftests/proc/proc-pid-vm.c h.e_machine = 0x3e; h 172 tools/testing/selftests/proc/proc-pid-vm.c h.e_version = 1; h 173 tools/testing/selftests/proc/proc-pid-vm.c h.e_entry = VADDR + sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr); h 174 tools/testing/selftests/proc/proc-pid-vm.c h.e_phoff = sizeof(struct elf64_hdr); h 175 tools/testing/selftests/proc/proc-pid-vm.c h.e_shoff = 0; h 176 tools/testing/selftests/proc/proc-pid-vm.c h.e_flags = 0; h 177 tools/testing/selftests/proc/proc-pid-vm.c h.e_ehsize = sizeof(struct elf64_hdr); h 178 tools/testing/selftests/proc/proc-pid-vm.c h.e_phentsize = sizeof(struct elf64_phdr); h 179 tools/testing/selftests/proc/proc-pid-vm.c h.e_phnum = 1; h 180 tools/testing/selftests/proc/proc-pid-vm.c h.e_shentsize = 0; h 181 tools/testing/selftests/proc/proc-pid-vm.c h.e_shnum = 0; h 182 tools/testing/selftests/proc/proc-pid-vm.c h.e_shstrndx = 0; h 83 tools/testing/selftests/vDSO/parse_vdso.c unsigned long h = 0, g; h 86 tools/testing/selftests/vDSO/parse_vdso.c h = (h << 4) + *name++; h 87 tools/testing/selftests/vDSO/parse_vdso.c if (g = h & 0xf0000000) h 88 tools/testing/selftests/vDSO/parse_vdso.c h ^= g >> 24; h 89 tools/testing/selftests/vDSO/parse_vdso.c h &= ~g; h 91 tools/testing/selftests/vDSO/parse_vdso.c return h; h 278 tools/testing/vsock/vsock_diag_test.c const struct nlmsghdr *h; h 284 tools/testing/vsock/vsock_diag_test.c if (ret < sizeof(*h)) { h 289 tools/testing/vsock/vsock_diag_test.c h = (struct nlmsghdr *)buf; h 291 tools/testing/vsock/vsock_diag_test.c while (NLMSG_OK(h, ret)) { h 292 tools/testing/vsock/vsock_diag_test.c if (h->nlmsg_type == NLMSG_DONE) h 295 tools/testing/vsock/vsock_diag_test.c if (h->nlmsg_type == NLMSG_ERROR) { h 296 tools/testing/vsock/vsock_diag_test.c const struct nlmsgerr *err = NLMSG_DATA(h); h 298 tools/testing/vsock/vsock_diag_test.c if (h->nlmsg_len < NLMSG_LENGTH(sizeof(*err))) h 308 tools/testing/vsock/vsock_diag_test.c if (h->nlmsg_type != SOCK_DIAG_BY_FAMILY) { h 310 tools/testing/vsock/vsock_diag_test.c h->nlmsg_type); h 313 tools/testing/vsock/vsock_diag_test.c if (h->nlmsg_len < h 319 tools/testing/vsock/vsock_diag_test.c add_vsock_stat(sockets, NLMSG_DATA(h)); h 321 tools/testing/vsock/vsock_diag_test.c h = NLMSG_NEXT(h, ret); h 191 tools/usb/usbip/libsrc/names.c unsigned int h = hashnum(vendorid); h 193 tools/usb/usbip/libsrc/names.c v = vendors[h]; h 202 tools/usb/usbip/libsrc/names.c v->next = vendors[h]; h 203 tools/usb/usbip/libsrc/names.c vendors[h] = v; h 211 tools/usb/usbip/libsrc/names.c unsigned int h = hashnum((vendorid << 16) | productid); h 213 tools/usb/usbip/libsrc/names.c p = products[h]; h 223 tools/usb/usbip/libsrc/names.c p->next = products[h]; h 224 tools/usb/usbip/libsrc/names.c products[h] = p; h 231 tools/usb/usbip/libsrc/names.c unsigned int h = hashnum(classid); h 233 tools/usb/usbip/libsrc/names.c c = classes[h]; h 242 tools/usb/usbip/libsrc/names.c c->next = classes[h]; h 243 tools/usb/usbip/libsrc/names.c classes[h] = c; h 250 tools/usb/usbip/libsrc/names.c unsigned int h = hashnum((classid << 8) | subclassid); h 252 tools/usb/usbip/libsrc/names.c s = subclasses[h]; h 262 tools/usb/usbip/libsrc/names.c s->next = subclasses[h]; h 263 tools/usb/usbip/libsrc/names.c subclasses[h] = s; h 271 tools/usb/usbip/libsrc/names.c unsigned int h = hashnum((classid << 16) | (subclassid << 8) h 274 tools/usb/usbip/libsrc/names.c p = protocols[h]; h 286 tools/usb/usbip/libsrc/names.c p->next = protocols[h]; h 287 tools/usb/usbip/libsrc/names.c protocols[h] = p; h 22 tools/virtio/linux/dma-mapping.h #define dma_free_coherent(d, s, p, h) kfree(p) h 2024 virt/kvm/arm/vgic/vgic-its.c static u32 compute_next_devid_offset(struct list_head *h, h 2030 virt/kvm/arm/vgic/vgic-its.c if (list_is_last(&dev->dev_list, h)) h 2038 virt/kvm/arm/vgic/vgic-its.c static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite) h 2043 virt/kvm/arm/vgic/vgic-its.c if (list_is_last(&ite->ite_list, h))