ct 31 arch/arc/include/asm/string.h extern int strcmp(const char *cs, const char *ct); ct 34 arch/arm/boot/compressed/decompress.c extern int memcmp(const void *cs, const void *ct, size_t count); ct 78 arch/arm/boot/compressed/string.c int memcmp(const void *cs, const void *ct, size_t count) ct 80 arch/arm/boot/compressed/string.c const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count; ct 91 arch/arm/boot/compressed/string.c int strcmp(const char *cs, const char *ct) ct 98 arch/arm/boot/compressed/string.c c2 = *ct++; ct 84 arch/arm/mach-imx/avic.c struct irq_chip_type *ct = gc->chip_types; ct 87 arch/arm/mach-imx/avic.c avic_saved_mask_reg[idx] = imx_readl(avic_base + ct->regs.mask); ct 88 arch/arm/mach-imx/avic.c imx_writel(gc->wake_active, avic_base + ct->regs.mask); ct 106 arch/arm/mach-imx/avic.c struct irq_chip_type *ct = gc->chip_types; ct 109 arch/arm/mach-imx/avic.c imx_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask); ct 127 arch/arm/mach-imx/avic.c struct irq_chip_type *ct; ct 134 arch/arm/mach-imx/avic.c ct = gc->chip_types; ct 135 arch/arm/mach-imx/avic.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 136 arch/arm/mach-imx/avic.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 137 arch/arm/mach-imx/avic.c ct->chip.irq_ack = irq_gc_mask_clr_bit; ct 138 arch/arm/mach-imx/avic.c ct->chip.irq_set_wake = irq_gc_set_wake; ct 139 arch/arm/mach-imx/avic.c ct->chip.irq_suspend = avic_irq_suspend; ct 140 arch/arm/mach-imx/avic.c ct->chip.irq_resume = avic_irq_resume; ct 141 arch/arm/mach-imx/avic.c ct->regs.mask = !idx ? AVIC_INTENABLEL : AVIC_INTENABLEH; ct 142 arch/arm/mach-imx/avic.c ct->regs.ack = ct->regs.mask; ct 103 arch/arm/mach-imx/tzic.c struct irq_chip_type *ct; ct 110 arch/arm/mach-imx/tzic.c ct = gc->chip_types; ct 111 arch/arm/mach-imx/tzic.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 112 arch/arm/mach-imx/tzic.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 113 arch/arm/mach-imx/tzic.c ct->chip.irq_set_wake = irq_gc_set_wake; ct 114 arch/arm/mach-imx/tzic.c ct->chip.irq_suspend = tzic_irq_suspend; ct 115 arch/arm/mach-imx/tzic.c ct->chip.irq_resume = tzic_irq_resume; ct 116 arch/arm/mach-imx/tzic.c ct->regs.disable = TZIC_ENCLEAR0(idx); ct 117 arch/arm/mach-imx/tzic.c ct->regs.enable = TZIC_ENSET0(idx); ct 89 arch/arm/mach-omap1/irq.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 91 arch/arm/mach-omap1/irq.c ct->chip.irq_mask(d); ct 178 arch/arm/mach-omap1/irq.c struct irq_chip_type *ct; ct 182 arch/arm/mach-omap1/irq.c ct = gc->chip_types; ct 183 arch/arm/mach-omap1/irq.c ct->chip.irq_ack = omap_mask_ack_irq; ct 184 arch/arm/mach-omap1/irq.c ct->chip.irq_mask = irq_gc_mask_set_bit; ct 185 arch/arm/mach-omap1/irq.c ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct 186 arch/arm/mach-omap1/irq.c ct->chip.irq_set_wake = irq_gc_set_wake; ct 187 arch/arm/mach-omap1/irq.c ct->regs.mask = IRQ_MIR_REG_OFFSET; ct 194 arch/arm/mach-omap1/irq.c struct irq_chip_type *ct; ct 273 arch/arm/mach-omap1/irq.c ct = irq_data_get_chip_type(d); ct 274 arch/arm/mach-omap1/irq.c ct->chip.irq_unmask(d); ct 265 arch/arm/mach-omap2/prm_common.c struct irq_chip_type *ct; ct 325 arch/arm/mach-omap2/prm_common.c ct = gc->chip_types; ct 326 arch/arm/mach-omap2/prm_common.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 327 arch/arm/mach-omap2/prm_common.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 328 arch/arm/mach-omap2/prm_common.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 330 arch/arm/mach-omap2/prm_common.c ct->regs.ack = irq_setup->ack + i * 4; ct 331 arch/arm/mach-omap2/prm_common.c ct->regs.mask = irq_setup->mask + i * 4; ct 354 arch/arm/plat-orion/gpio.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 371 arch/arm/plat-orion/gpio.c if (!(ct->type & type)) ct 498 arch/arm/plat-orion/gpio.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 503 arch/arm/plat-orion/gpio.c reg_val = irq_reg_readl(gc, ct->regs.mask); ct 505 arch/arm/plat-orion/gpio.c irq_reg_writel(gc, reg_val, ct->regs.mask); ct 512 arch/arm/plat-orion/gpio.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 517 arch/arm/plat-orion/gpio.c reg_val = irq_reg_readl(gc, ct->regs.mask); ct 519 arch/arm/plat-orion/gpio.c irq_reg_writel(gc, reg_val, ct->regs.mask); ct 531 arch/arm/plat-orion/gpio.c struct irq_chip_type *ct; ct 589 arch/arm/plat-orion/gpio.c ct = gc->chip_types; ct 590 arch/arm/plat-orion/gpio.c ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF; ct 591 arch/arm/plat-orion/gpio.c ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct 592 arch/arm/plat-orion/gpio.c ct->chip.irq_mask = orion_gpio_mask_irq; ct 593 arch/arm/plat-orion/gpio.c ct->chip.irq_unmask = orion_gpio_unmask_irq; ct 594 arch/arm/plat-orion/gpio.c ct->chip.irq_set_type = gpio_irq_set_type; ct 595 arch/arm/plat-orion/gpio.c ct->chip.name = ochip->chip.label; ct 597 arch/arm/plat-orion/gpio.c ct++; ct 598 arch/arm/plat-orion/gpio.c ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; ct 599 arch/arm/plat-orion/gpio.c ct->regs.ack = GPIO_EDGE_CAUSE_OFF; ct 600 arch/arm/plat-orion/gpio.c ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ct 601 arch/arm/plat-orion/gpio.c ct->chip.irq_ack = irq_gc_ack_clr_bit; ct 602 arch/arm/plat-orion/gpio.c ct->chip.irq_mask = orion_gpio_mask_irq; ct 603 arch/arm/plat-orion/gpio.c ct->chip.irq_unmask = orion_gpio_unmask_irq; ct 604 arch/arm/plat-orion/gpio.c ct->chip.irq_set_type = gpio_irq_set_type; ct 605 arch/arm/plat-orion/gpio.c ct->handler = handle_edge_irq; ct 606 arch/arm/plat-orion/gpio.c ct->chip.name = ochip->chip.label; ct 25 arch/arm/plat-orion/irq.c struct irq_chip_type *ct; ct 34 arch/arm/plat-orion/irq.c ct = gc->chip_types; ct 35 arch/arm/plat-orion/irq.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 36 arch/arm/plat-orion/irq.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 604 arch/ia64/kernel/palinfo.c pal_bus_features_u_t av, st, ct; ct 609 arch/ia64/kernel/palinfo.c if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) ct 614 arch/ia64/kernel/palinfo.c control = ct.pal_bus_features_val; ct 43 arch/m68k/include/asm/string.h static inline int strcmp(const char *cs, const char *ct) ct 56 arch/m68k/include/asm/string.h : "+a" (cs), "+a" (ct), "=d" (res)); ct 294 arch/mips/alchemy/common/clock.c static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) ct 301 arch/mips/alchemy/common/clock.c switch (ct) { ct 240 arch/mips/include/asm/xtalk/xwidget.h unsigned ct:1; ct 34 arch/powerpc/include/asm/cputime.h static inline unsigned long cputime_to_usecs(const cputime_t ct) ct 36 arch/powerpc/include/asm/cputime.h return mulhdu((__force u64) ct, __cputime_usec_factor); ct 299 arch/powerpc/include/asm/opal.h int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct); ct 479 arch/powerpc/include/asm/ps3av.h u8 ct:4; ct 229 arch/powerpc/kernel/prom_init.c static int __init prom_strcmp(const char *cs, const char *ct) ct 235 arch/powerpc/kernel/prom_init.c c2 = *ct++; ct 253 arch/powerpc/kernel/prom_init.c static int __init prom_strncmp(const char *cs, const char *ct, size_t count) ct 259 arch/powerpc/kernel/prom_init.c c2 = *ct++; ct 278 arch/powerpc/kernel/prom_init.c static int __init prom_memcmp(const void *cs, const void *ct, size_t count) ct 283 arch/powerpc/kernel/prom_init.c for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) ct 531 arch/powerpc/perf/hv-24x7.c unsigned ct; ct 580 arch/powerpc/perf/hv-24x7.c it->ct++; ct 582 arch/powerpc/perf/hv-24x7.c name, it->ct); ct 583 arch/powerpc/perf/hv-24x7.c return it->ct; ct 594 arch/powerpc/perf/hv-24x7.c .ct = 0, ct 692 arch/powerpc/perf/hv-24x7.c ssize_t ct, ev_len; ct 873 arch/powerpc/perf/hv-24x7.c ct = event_data_to_attrs(event_idx, events + event_attr_ct, ct 875 arch/powerpc/perf/hv-24x7.c if (ct < 0) { ct 1285 arch/powerpc/perf/hv-24x7.c u64 ct; ct 1337 arch/powerpc/perf/hv-24x7.c if (single_24x7_request(event, &ct)) { ct 1341 arch/powerpc/perf/hv-24x7.c (void)local64_xchg(&event->hw.prev_count, ct); ct 1348 arch/powerpc/perf/hv-24x7.c u64 ct; ct 1350 arch/powerpc/perf/hv-24x7.c if (single_24x7_request(event, &ct)) ct 1354 arch/powerpc/perf/hv-24x7.c return ct; ct 215 arch/powerpc/perf/imc-pmu.c int i = 0, j = 0, ct, ret; ct 229 arch/powerpc/perf/imc-pmu.c ct = of_get_child_count(pmu_events); ct 246 arch/powerpc/perf/imc-pmu.c pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); ct 250 arch/powerpc/perf/imc-pmu.c ct = 0; ct 253 arch/powerpc/perf/imc-pmu.c ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); ct 255 arch/powerpc/perf/imc-pmu.c ct++; ct 261 arch/powerpc/perf/imc-pmu.c imc_free_events(pmu->events, ct); ct 273 arch/powerpc/perf/imc-pmu.c attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL); ct 276 arch/powerpc/perf/imc-pmu.c imc_free_events(pmu->events, ct); ct 306 arch/powerpc/perf/imc-pmu.c } while (++i < ct); ct 120 arch/powerpc/platforms/52xx/media5200.c static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct, ct 215 arch/powerpc/platforms/52xx/mpc52xx_gpt.c static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct, ct 225 arch/powerpc/platforms/52xx/mpc52xx_gpt.c dev_err(gpt->dev, "bad irq specifier in %pOF\n", ct); ct 304 arch/powerpc/platforms/52xx/mpc52xx_pic.c static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct, ct 240 arch/powerpc/platforms/85xx/socrates_fpga_pic.c struct device_node *ct, const u32 *intspec, unsigned int intsize, ct 99 arch/powerpc/platforms/8xx/pic.c static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, ct 231 arch/powerpc/platforms/cell/interrupt.c static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, ct 239 arch/powerpc/platforms/cell/interrupt.c if (!of_device_is_compatible(ct, ct 244 arch/powerpc/platforms/cell/interrupt.c val = of_get_property(ct, "#interrupt-cells", NULL); ct 170 arch/powerpc/platforms/cell/spider-pic.c static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, ct 217 arch/powerpc/sysdev/ehv_pic.c static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct, ct 166 arch/powerpc/sysdev/ge/ge_pic.c static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct, ct 185 arch/powerpc/sysdev/i8259.c static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct, ct 1085 arch/powerpc/sysdev/mpic.c static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, ct 364 arch/powerpc/sysdev/tsi108_pci.c static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct, ct 359 arch/powerpc/sysdev/xics/xics-common.c static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, ct 1271 arch/powerpc/sysdev/xive/common.c static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, ct 8 arch/s390/boot/string.c int strncmp(const char *cs, const char *ct, size_t count) ct 14 arch/s390/boot/string.c c2 = *ct++; ct 258 arch/s390/include/asm/cio.h __u32 ct : 4; ct 79 arch/s390/include/asm/nmi.h u64 ct : 1; /* 46 cpu timer validity */ ct 132 arch/s390/kernel/cache.c union cache_topology ct; ct 139 arch/s390/kernel/cache.c ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); ct 141 arch/s390/kernel/cache.c ctype = get_cache_type(&ct.ci[0], level); ct 157 arch/s390/kernel/cache.c union cache_topology ct; ct 162 arch/s390/kernel/cache.c ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); ct 167 arch/s390/kernel/cache.c pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; ct 168 arch/s390/kernel/cache.c ctype = get_cache_type(&ct.ci[0], level); ct 70 arch/sh/boards/mach-se/7343/irq.c struct irq_chip_type *ct; ct 80 arch/sh/boards/mach-se/7343/irq.c ct = gc->chip_types; ct 81 arch/sh/boards/mach-se/7343/irq.c ct->chip.irq_mask = irq_gc_mask_set_bit; ct 82 arch/sh/boards/mach-se/7343/irq.c ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct 84 arch/sh/boards/mach-se/7343/irq.c ct->regs.mask = PA_CPLD_IMSK_REG; ct 69 arch/sh/boards/mach-se/7722/irq.c struct irq_chip_type *ct; ct 79 arch/sh/boards/mach-se/7722/irq.c ct = gc->chip_types; ct 80 arch/sh/boards/mach-se/7722/irq.c ct->chip.irq_mask = irq_gc_mask_set_bit; ct 81 arch/sh/boards/mach-se/7722/irq.c ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct 83 arch/sh/boards/mach-se/7722/irq.c ct->regs.mask = IRQ01_MASK_REG; ct 327 arch/x86/boot/boot.h int strncmp(const char *cs, const char *ct, size_t count); ct 64 arch/x86/boot/string.c int strncmp(const char *cs, const char *ct, size_t count) ct 70 arch/x86/boot/string.c c2 = *ct++; ct 23 arch/x86/boot/string.h extern int strncmp(const char *cs, const char *ct, size_t count); ct 4237 arch/x86/events/intel/core.c EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); ct 22 arch/x86/include/asm/string_32.h extern int strcmp(const char *cs, const char *ct); ct 25 arch/x86/include/asm/string_32.h extern int strncmp(const char *cs, const char *ct, size_t count); ct 218 arch/x86/include/asm/string_32.h extern char *strstr(const char *cs, const char *ct); ct 61 arch/x86/include/asm/string_64.h int memcmp(const void *cs, const void *ct, size_t count); ct 65 arch/x86/include/asm/string_64.h int strcmp(const char *cs, const char *ct); ct 95 arch/x86/lib/string_32.c int strcmp(const char *cs, const char *ct) ct 110 arch/x86/lib/string_32.c : "1" (cs), "2" (ct) ct 118 arch/x86/lib/string_32.c int strncmp(const char *cs, const char *ct, size_t count) ct 135 arch/x86/lib/string_32.c : "1" (cs), "2" (ct), "3" (count) ct 5 arch/x86/lib/strstr_32.c char *strstr(const char *cs, const char *ct) ct 29 arch/x86/lib/strstr_32.c : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) ct 1172 drivers/ata/pata_legacy.c int ct = 0; ct 1228 drivers/ata/pata_legacy.c ct++; ct 1230 drivers/ata/pata_legacy.c if (ct != 0) ct 294 drivers/crypto/mediatek/mtk-aes.c cmd->ct = cpu_to_le32(aes->ctx->ct_dma); ct 870 drivers/crypto/mediatek/mtk-aes.c u32 status = cryp->ring[aes->id]->res_prev->ct; ct 63 drivers/crypto/mediatek/mtk-platform.h __le32 ct; ct 441 drivers/crypto/mediatek/mtk-sha.c cmd->ct = cpu_to_le32(ctx->ct_dma); ct 45 drivers/crypto/nx/nx-842-powernv.c unsigned int ct; ct 686 drivers/crypto/nx/nx-842-powernv.c vas_init_tx_win_attr(&txattr, coproc->ct); ct 693 drivers/crypto/nx/nx-842-powernv.c txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr); ct 723 drivers/crypto/nx/nx-842-powernv.c if (coproc->ct != VAS_COP_TYPE_842_HIPRI) ct 747 drivers/crypto/nx/nx-842-powernv.c int vasid, int *ct) ct 798 drivers/crypto/nx/nx-842-powernv.c coproc->ct = VAS_COP_TYPE_842_HIPRI; ct 800 drivers/crypto/nx/nx-842-powernv.c coproc->ct = VAS_COP_TYPE_842; ct 807 drivers/crypto/nx/nx-842-powernv.c vas_init_rx_win_attr(&rxattr, coproc->ct); ct 823 drivers/crypto/nx/nx-842-powernv.c rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr); ct 842 drivers/crypto/nx/nx-842-powernv.c *ct = pid; ct 857 drivers/crypto/nx/nx-842-powernv.c int uninitialized_var(ct); ct 873 drivers/crypto/nx/nx-842-powernv.c ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct); ct 891 drivers/crypto/nx/nx-842-powernv.c ret = opal_nx_coproc_init(chip_id, ct); ct 906 drivers/crypto/nx/nx-842-powernv.c unsigned int ct, ci; ct 915 drivers/crypto/nx/nx-842-powernv.c if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { ct 929 drivers/crypto/nx/nx-842-powernv.c coproc->ct = ct; ct 933 drivers/crypto/nx/nx-842-powernv.c pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); ct 936 drivers/crypto/nx/nx-842-powernv.c nx842_ct = ct; ct 937 drivers/crypto/nx/nx-842-powernv.c else if (nx842_ct != ct) ct 939 drivers/crypto/nx/nx-842-powernv.c chip_id, ct, nx842_ct); ct 43 drivers/firmware/efi/libstub/string.c int strncmp(const char *cs, const char *ct, size_t count) ct 49 drivers/firmware/efi/libstub/string.c c2 = *ct++; ct 832 drivers/firmware/stratix10-svc.c struct stratix10_svc_command_config_type *ct = ct 835 drivers/firmware/stratix10-svc.c p_data->flag = ct->flags; ct 392 drivers/gpio/gpio-dwapb.c struct irq_chip_type *ct; ct 422 drivers/gpio/gpio-dwapb.c ct = &irq_gc->chip_types[i]; ct 423 drivers/gpio/gpio-dwapb.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 424 drivers/gpio/gpio-dwapb.c ct->chip.irq_mask = irq_gc_mask_set_bit; ct 425 drivers/gpio/gpio-dwapb.c ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct 426 drivers/gpio/gpio-dwapb.c ct->chip.irq_set_type = dwapb_irq_set_type; ct 427 drivers/gpio/gpio-dwapb.c ct->chip.irq_enable = dwapb_irq_enable; ct 428 drivers/gpio/gpio-dwapb.c ct->chip.irq_disable = dwapb_irq_disable; ct 429 drivers/gpio/gpio-dwapb.c ct->chip.irq_request_resources = dwapb_irq_reqres; ct 430 drivers/gpio/gpio-dwapb.c ct->chip.irq_release_resources = dwapb_irq_relres; ct 432 drivers/gpio/gpio-dwapb.c ct->chip.irq_set_wake = dwapb_irq_set_wake; ct 434 drivers/gpio/gpio-dwapb.c ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI); ct 435 drivers/gpio/gpio-dwapb.c ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK); ct 436 drivers/gpio/gpio-dwapb.c ct->type = IRQ_TYPE_LEVEL_MASK; ct 379 drivers/gpio/gpio-ml-ioh.c struct irq_chip_type *ct; ct 388 drivers/gpio/gpio-ml-ioh.c ct = gc->chip_types; ct 390 drivers/gpio/gpio-ml-ioh.c ct->chip.irq_mask = ioh_irq_mask; ct 391 drivers/gpio/gpio-ml-ioh.c ct->chip.irq_unmask = ioh_irq_unmask; ct 392 drivers/gpio/gpio-ml-ioh.c ct->chip.irq_set_type = ioh_irq_type; ct 393 drivers/gpio/gpio-ml-ioh.c ct->chip.irq_disable = ioh_irq_disable; ct 394 drivers/gpio/gpio-ml-ioh.c ct->chip.irq_enable = ioh_irq_enable; ct 415 drivers/gpio/gpio-mvebu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 419 drivers/gpio/gpio-mvebu.c ct->mask_cache_priv &= ~mask; ct 420 drivers/gpio/gpio-mvebu.c mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); ct 428 drivers/gpio/gpio-mvebu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 432 drivers/gpio/gpio-mvebu.c ct->mask_cache_priv |= mask; ct 433 drivers/gpio/gpio-mvebu.c mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); ct 441 drivers/gpio/gpio-mvebu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 445 drivers/gpio/gpio-mvebu.c ct->mask_cache_priv &= ~mask; ct 446 drivers/gpio/gpio-mvebu.c mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); ct 454 drivers/gpio/gpio-mvebu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 458 drivers/gpio/gpio-mvebu.c ct->mask_cache_priv |= mask; ct 459 drivers/gpio/gpio-mvebu.c mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); ct 492 drivers/gpio/gpio-mvebu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 508 drivers/gpio/gpio-mvebu.c if (!(ct->type & type)) ct 1093 drivers/gpio/gpio-mvebu.c struct irq_chip_type *ct; ct 1226 drivers/gpio/gpio-mvebu.c ct = &gc->chip_types[0]; ct 1227 drivers/gpio/gpio-mvebu.c ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct 1228 drivers/gpio/gpio-mvebu.c ct->chip.irq_mask = mvebu_gpio_level_irq_mask; ct 1229 drivers/gpio/gpio-mvebu.c ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask; ct 1230 drivers/gpio/gpio-mvebu.c ct->chip.irq_set_type = mvebu_gpio_irq_set_type; ct 1231 drivers/gpio/gpio-mvebu.c ct->chip.name = mvchip->chip.label; ct 1233 drivers/gpio/gpio-mvebu.c ct = &gc->chip_types[1]; ct 1234 drivers/gpio/gpio-mvebu.c ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ct 1235 drivers/gpio/gpio-mvebu.c ct->chip.irq_ack = mvebu_gpio_irq_ack; ct 1236 drivers/gpio/gpio-mvebu.c ct->chip.irq_mask = mvebu_gpio_edge_irq_mask; ct 1237 drivers/gpio/gpio-mvebu.c ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask; ct 1238 drivers/gpio/gpio-mvebu.c ct->chip.irq_set_type = mvebu_gpio_irq_set_type; ct 1239 drivers/gpio/gpio-mvebu.c ct->handler = handle_edge_irq; ct 1240 drivers/gpio/gpio-mvebu.c ct->chip.name = mvchip->chip.label; ct 347 drivers/gpio/gpio-mxc.c struct irq_chip_type *ct; ct 356 drivers/gpio/gpio-mxc.c ct = gc->chip_types; ct 357 drivers/gpio/gpio-mxc.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 358 drivers/gpio/gpio-mxc.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 359 drivers/gpio/gpio-mxc.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 360 drivers/gpio/gpio-mxc.c ct->chip.irq_set_type = gpio_set_irq_type; ct 361 drivers/gpio/gpio-mxc.c ct->chip.irq_set_wake = gpio_set_wake_irq; ct 362 drivers/gpio/gpio-mxc.c ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND; ct 363 drivers/gpio/gpio-mxc.c ct->regs.ack = GPIO_ISR; ct 364 drivers/gpio/gpio-mxc.c ct->regs.mask = GPIO_IMR; ct 75 drivers/gpio/gpio-mxs.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 80 drivers/gpio/gpio-mxs.c if (!(ct->type & type)) ct 195 drivers/gpio/gpio-mxs.c struct irq_chip_type *ct; ct 205 drivers/gpio/gpio-mxs.c ct = &gc->chip_types[0]; ct 206 drivers/gpio/gpio-mxs.c ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct 207 drivers/gpio/gpio-mxs.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 208 drivers/gpio/gpio-mxs.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 209 drivers/gpio/gpio-mxs.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 210 drivers/gpio/gpio-mxs.c ct->chip.irq_set_type = mxs_gpio_set_irq_type; ct 211 drivers/gpio/gpio-mxs.c ct->chip.irq_set_wake = mxs_gpio_set_wake_irq; ct 212 drivers/gpio/gpio-mxs.c ct->chip.flags = IRQCHIP_SET_TYPE_MASKED; ct 213 drivers/gpio/gpio-mxs.c ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR; ct 214 drivers/gpio/gpio-mxs.c ct->regs.enable = PINCTRL_PIN2IRQ(port) + MXS_SET; ct 215 drivers/gpio/gpio-mxs.c ct->regs.disable = PINCTRL_PIN2IRQ(port) + MXS_CLR; ct 217 drivers/gpio/gpio-mxs.c ct = &gc->chip_types[1]; ct 218 drivers/gpio/gpio-mxs.c ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ct 219 drivers/gpio/gpio-mxs.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 220 drivers/gpio/gpio-mxs.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 221 drivers/gpio/gpio-mxs.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 222 drivers/gpio/gpio-mxs.c ct->chip.irq_set_type = mxs_gpio_set_irq_type; ct 223 drivers/gpio/gpio-mxs.c ct->chip.irq_set_wake = mxs_gpio_set_wake_irq; ct 224 drivers/gpio/gpio-mxs.c ct->chip.flags = IRQCHIP_SET_TYPE_MASKED; ct 225 drivers/gpio/gpio-mxs.c ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR; ct 226 drivers/gpio/gpio-mxs.c ct->regs.enable = PINCTRL_IRQEN(port) + MXS_SET; ct 227 drivers/gpio/gpio-mxs.c ct->regs.disable = PINCTRL_IRQEN(port) + MXS_CLR; ct 228 drivers/gpio/gpio-mxs.c ct->handler = handle_level_irq; ct 318 drivers/gpio/gpio-pch.c struct irq_chip_type *ct; ct 327 drivers/gpio/gpio-pch.c ct = gc->chip_types; ct 329 drivers/gpio/gpio-pch.c ct->chip.irq_ack = pch_irq_ack; ct 330 drivers/gpio/gpio-pch.c ct->chip.irq_mask = pch_irq_mask; ct 331 drivers/gpio/gpio-pch.c ct->chip.irq_unmask = pch_irq_unmask; ct 332 drivers/gpio/gpio-pch.c ct->chip.irq_set_type = pch_irq_type; ct 128 drivers/gpio/gpio-sodaville.c struct irq_chip_type *ct; ct 159 drivers/gpio/gpio-sodaville.c ct = sd->gc->chip_types; ct 160 drivers/gpio/gpio-sodaville.c ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct 161 drivers/gpio/gpio-sodaville.c ct->regs.eoi = GPSTR; ct 162 drivers/gpio/gpio-sodaville.c ct->regs.mask = GPIO_INT; ct 163 drivers/gpio/gpio-sodaville.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 164 drivers/gpio/gpio-sodaville.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 165 drivers/gpio/gpio-sodaville.c ct->chip.irq_eoi = irq_gc_eoi; ct 166 drivers/gpio/gpio-sodaville.c ct->chip.irq_set_type = sdv_gpio_pub_set_type; ct 304 drivers/gpio/gpio-sta2x11.c struct irq_chip_type *ct; ct 314 drivers/gpio/gpio-sta2x11.c ct = gc->chip_types; ct 316 drivers/gpio/gpio-sta2x11.c ct->chip.irq_set_type = gsta_irq_type; ct 317 drivers/gpio/gpio-sta2x11.c ct->chip.irq_disable = gsta_irq_disable; ct 318 drivers/gpio/gpio-sta2x11.c ct->chip.irq_enable = gsta_irq_enable; ct 329 drivers/gpio/gpio-sta2x11.c struct irq_chip_type *ct = gc->chip_types; ct 333 drivers/gpio/gpio-sta2x11.c irq_set_chip_and_handler(i, &ct->chip, ct->handler); ct 36 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) ct 620 drivers/gpu/drm/amd/amdkfd/kfd_crat.c int ct = 0; ct 694 drivers/gpu/drm/amd/amdkfd/kfd_crat.c for (ct = 0; ct < num_of_cache_types; ct++) { ct 700 drivers/gpu/drm/amd/amdkfd/kfd_crat.c k += pcache_info[ct].num_cu_shared) { ct 707 drivers/gpu/drm/amd/amdkfd/kfd_crat.c ct, ct 725 drivers/gpu/drm/amd/amdkfd/kfd_crat.c pcache_info[ct].num_cu_shared; ct 438 drivers/gpu/drm/drm_mode_config.c struct drm_crtc *crtc, *ct; ct 478 drivers/gpu/drm/drm_mode_config.c list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { ct 185 drivers/gpu/drm/i915/display/intel_bw.c int ct, bw; ct 193 drivers/gpu/drm/i915/display/intel_bw.c ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + ct 195 drivers/gpu/drm/i915/display/intel_bw.c bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); ct 64 drivers/gpu/drm/i915/gt/uc/intel_guc.c intel_guc_ct_init_early(&guc->ct); ct 271 drivers/gpu/drm/i915/gt/uc/intel_guc.c ret = intel_guc_ct_init(&guc->ct); ct 294 drivers/gpu/drm/i915/gt/uc/intel_guc.c intel_guc_ct_fini(&guc->ct); ct 321 drivers/gpu/drm/i915/gt/uc/intel_guc.c intel_guc_ct_fini(&guc->ct); ct 29 drivers/gpu/drm/i915/gt/uc/intel_guc.h struct intel_guc_ct ct; ct 38 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c void intel_guc_ct_init_early(struct intel_guc_ct *ct) ct 41 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c ct->host_channel.owner = CTB_OWNER_HOST; ct 43 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_lock_init(&ct->lock); ct 44 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c INIT_LIST_HEAD(&ct->pending_requests); ct 45 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c INIT_LIST_HEAD(&ct->incoming_requests); ct 46 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c INIT_WORK(&ct->worker, ct_incoming_request_worker_func); ct 49 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) ct 51 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c return container_of(ct, struct intel_guc, ct); ct 443 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static int ctch_send(struct intel_guc_ct *ct, ct 469 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_lock_irqsave(&ct->lock, flags); ct 470 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c list_add_tail(&request.link, &ct->pending_requests); ct 471 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_unlock_irqrestore(&ct->lock, flags); ct 477 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c intel_guc_notify(ct_to_guc(ct)); ct 504 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_lock_irqsave(&ct->lock, flags); ct 506 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_unlock_irqrestore(&ct->lock, flags); ct 517 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct *ct = &guc->ct; ct 518 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct_channel *ctch = &ct->host_channel; ct 524 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, ct 624 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) ct 656 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_lock(&ct->lock); ct 657 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c list_for_each_entry(req, &ct->pending_requests, link) { ct 675 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_unlock(&ct->lock); ct 682 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static void ct_process_request(struct intel_guc_ct *ct, ct 685 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc *guc = ct_to_guc(ct); ct 705 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static bool ct_process_incoming_requests(struct intel_guc_ct *ct) ct 713 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_lock_irqsave(&ct->lock, flags); ct 714 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c request = list_first_entry_or_null(&ct->incoming_requests, ct 718 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c done = !!list_empty(&ct->incoming_requests); ct 719 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_unlock_irqrestore(&ct->lock, flags); ct 726 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c ct_process_request(ct, ct 737 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); ct 740 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c done = ct_process_incoming_requests(ct); ct 742 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c queue_work(system_unbound_wq, &ct->worker); ct 763 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) ct 780 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_lock_irqsave(&ct->lock, flags); ct 781 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c list_add_tail(&request->link, &ct->incoming_requests); ct 782 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c spin_unlock_irqrestore(&ct->lock, flags); ct 784 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c queue_work(system_unbound_wq, &ct->worker); ct 788 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c static void ct_process_host_channel(struct intel_guc_ct *ct) ct 790 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct_channel *ctch = &ct->host_channel; ct 804 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c err = ct_handle_response(ct, msg); ct 806 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c err = ct_handle_request(ct, msg); ct 821 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct *ct = &guc->ct; ct 823 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c ct_process_host_channel(ct); ct 835 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c int intel_guc_ct_init(struct intel_guc_ct *ct) ct 837 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc *guc = ct_to_guc(ct); ct 838 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct_channel *ctch = &ct->host_channel; ct 859 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c void intel_guc_ct_fini(struct intel_guc_ct *ct) ct 861 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc *guc = ct_to_guc(ct); ct 862 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct_channel *ctch = &ct->host_channel; ct 873 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c int intel_guc_ct_enable(struct intel_guc_ct *ct) ct 875 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc *guc = ct_to_guc(ct); ct 876 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct_channel *ctch = &ct->host_channel; ct 888 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c void intel_guc_ct_disable(struct intel_guc_ct *ct) ct 890 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc *guc = ct_to_guc(ct); ct 891 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c struct intel_guc_ct_channel *ctch = &ct->host_channel; ct 78 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h void intel_guc_ct_init_early(struct intel_guc_ct *ct); ct 79 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h int intel_guc_ct_init(struct intel_guc_ct *ct); ct 80 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h void intel_guc_ct_fini(struct intel_guc_ct *ct); ct 81 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h int intel_guc_ct_enable(struct intel_guc_ct *ct); ct 82 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h void intel_guc_ct_disable(struct intel_guc_ct *ct); ct 84 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) ct 86 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h ct->host_channel.enabled = false; ct 204 drivers/gpu/drm/i915/gt/uc/intel_uc.c ret = intel_guc_ct_enable(&guc->ct); ct 229 drivers/gpu/drm/i915/gt/uc/intel_uc.c intel_guc_ct_stop(&guc->ct); ct 251 drivers/gpu/drm/i915/gt/uc/intel_uc.c intel_guc_ct_disable(&guc->ct); ct 193 drivers/gpu/drm/nouveau/dispnv04/disp.c struct drm_connector *connector, *ct; ct 257 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry_safe(connector, ct, ct 474 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c char ct[8] = "HUB/", en[16] = ""; ct 483 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c snprintf(ct, sizeof(ct), "GPC%d/", info->gpc); ct 526 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c info->client, ct, ec ? ec->name : "", ct 873 drivers/gpu/drm/omapdrm/dss/dispc.c const struct csc_coef_yuv2rgb *ct) ct 877 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry)); ct 878 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb)); ct 879 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr)); ct 880 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by)); ct 881 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb)); ct 883 drivers/gpu/drm/omapdrm/dss/dispc.c REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11); ct 889 drivers/gpu/drm/omapdrm/dss/dispc.c const struct csc_coef_rgb2yuv *ct) ct 895 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr)); ct 896 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb)); ct 897 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg)); ct 898 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr)); ct 899 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb)); ct 901 drivers/gpu/drm/omapdrm/dss/dispc.c REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11); ct 609 drivers/gpu/drm/radeon/radeon_atombios.c uint32_t slot_config, ct; ct 620 drivers/gpu/drm/radeon/radeon_atombios.c ct = (slot_config >> 16) & 0xff; ct 623 drivers/gpu/drm/radeon/radeon_atombios.c [ct]; ct 624 drivers/gpu/drm/radeon/radeon_atombios.c connector_object_id = ct; ct 1264 drivers/gpu/ipu-v3/ipu-common.c struct irq_chip_type *ct; ct 1302 drivers/gpu/ipu-v3/ipu-common.c ct = gc->chip_types; ct 1303 drivers/gpu/ipu-v3/ipu-common.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 1304 drivers/gpu/ipu-v3/ipu-common.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 1305 drivers/gpu/ipu-v3/ipu-common.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 1306 drivers/gpu/ipu-v3/ipu-common.c ct->regs.ack = IPU_INT_STAT(i / 32); ct 1307 drivers/gpu/ipu-v3/ipu-common.c ct->regs.mask = IPU_INT_CTRL(i / 32); ct 400 drivers/infiniband/hw/hfi1/init.c u16 ct = ctxt - dd->first_dyn_alloc_ctxt; ct 404 drivers/infiniband/hw/hfi1/init.c if (ct < dd->rcv_entries.nctxt_extra) { ct 405 drivers/infiniband/hw/hfi1/init.c base += ct * (dd->rcv_entries.ngroups + 1); ct 409 drivers/infiniband/hw/hfi1/init.c (ct * dd->rcv_entries.ngroups); ct 43 drivers/irqchip/irq-atmel-aic-common.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 45 drivers/irqchip/irq-atmel-aic-common.c ct->chip.irq_mask(d); ct 89 drivers/irqchip/irq-bcm7120-l2.c struct irq_chip_type *ct = gc->chip_types; ct 94 drivers/irqchip/irq-bcm7120-l2.c ct->regs.mask); ct 100 drivers/irqchip/irq-bcm7120-l2.c struct irq_chip_type *ct = gc->chip_types; ct 104 drivers/irqchip/irq-bcm7120-l2.c irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); ct 223 drivers/irqchip/irq-bcm7120-l2.c struct irq_chip_type *ct; ct 286 drivers/irqchip/irq-bcm7120-l2.c ct = gc->chip_types; ct 289 drivers/irqchip/irq-bcm7120-l2.c ct->regs.mask = data->en_offset[idx]; ct 295 drivers/irqchip/irq-bcm7120-l2.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 296 drivers/irqchip/irq-bcm7120-l2.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 297 drivers/irqchip/irq-bcm7120-l2.c ct->chip.irq_ack = irq_gc_noop; ct 306 drivers/irqchip/irq-bcm7120-l2.c gc->mask_cache = irq_reg_readl(gc, ct->regs.mask); ct 314 drivers/irqchip/irq-bcm7120-l2.c ct->chip.irq_set_wake = irq_gc_set_wake; ct 81 drivers/irqchip/irq-brcmstb-l2.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 85 drivers/irqchip/irq-brcmstb-l2.c irq_reg_writel(gc, mask, ct->regs.disable); ct 86 drivers/irqchip/irq-brcmstb-l2.c *ct->mask_cache &= ~mask; ct 87 drivers/irqchip/irq-brcmstb-l2.c irq_reg_writel(gc, mask, ct->regs.ack); ct 122 drivers/irqchip/irq-brcmstb-l2.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 128 drivers/irqchip/irq-brcmstb-l2.c b->saved_mask = irq_reg_readl(gc, ct->regs.mask); ct 132 drivers/irqchip/irq-brcmstb-l2.c irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); ct 133 drivers/irqchip/irq-brcmstb-l2.c irq_reg_writel(gc, gc->wake_active, ct->regs.enable); ct 141 drivers/irqchip/irq-brcmstb-l2.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 146 drivers/irqchip/irq-brcmstb-l2.c if (ct->chip.irq_ack) { ct 149 drivers/irqchip/irq-brcmstb-l2.c ct->regs.ack); ct 153 drivers/irqchip/irq-brcmstb-l2.c irq_reg_writel(gc, b->saved_mask, ct->regs.disable); ct 154 drivers/irqchip/irq-brcmstb-l2.c irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); ct 165 drivers/irqchip/irq-brcmstb-l2.c struct irq_chip_type *ct; ct 229 drivers/irqchip/irq-brcmstb-l2.c ct = data->gc->chip_types; ct 232 drivers/irqchip/irq-brcmstb-l2.c ct->regs.ack = init_params->cpu_clear; ct 233 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 234 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack; ct 237 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct 240 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 241 drivers/irqchip/irq-brcmstb-l2.c ct->regs.disable = init_params->cpu_mask_set; ct 242 drivers/irqchip/irq-brcmstb-l2.c ct->regs.mask = init_params->cpu_mask_status; ct 244 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 245 drivers/irqchip/irq-brcmstb-l2.c ct->regs.enable = init_params->cpu_mask_clear; ct 247 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_suspend = brcmstb_l2_intc_suspend; ct 248 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_resume = brcmstb_l2_intc_resume; ct 249 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend; ct 256 drivers/irqchip/irq-brcmstb-l2.c ct->chip.irq_set_wake = irq_gc_set_wake; ct 49 drivers/irqchip/irq-csky-apb-intc.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 50 drivers/irqchip/irq-csky-apb-intc.c unsigned long ifr = ct->regs.mask - 8; ct 54 drivers/irqchip/irq-csky-apb-intc.c *ct->mask_cache |= mask; ct 55 drivers/irqchip/irq-csky-apb-intc.c irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); ct 47 drivers/irqchip/irq-davinci-aintc.c struct irq_chip_type *ct; ct 53 drivers/irqchip/irq-davinci-aintc.c ct = gc->chip_types; ct 54 drivers/irqchip/irq-davinci-aintc.c ct->chip.irq_ack = irq_gc_ack_set_bit; ct 55 drivers/irqchip/irq-davinci-aintc.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 56 drivers/irqchip/irq-davinci-aintc.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 58 drivers/irqchip/irq-davinci-aintc.c ct->regs.ack = DAVINCI_AINTC_IRQ_REG0; ct 59 drivers/irqchip/irq-davinci-aintc.c ct->regs.mask = DAVINCI_AINTC_IRQ_ENT_REG0; ct 57 drivers/irqchip/irq-dw-apb-ictl.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 60 drivers/irqchip/irq-dw-apb-ictl.c writel_relaxed(~0, gc->reg_base + ct->regs.enable); ct 61 drivers/irqchip/irq-dw-apb-ictl.c writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); ct 61 drivers/irqchip/irq-goldfish-pic.c struct irq_chip_type *ct; ct 96 drivers/irqchip/irq-goldfish-pic.c ct = gc->chip_types; ct 97 drivers/irqchip/irq-goldfish-pic.c ct->regs.enable = GFPIC_REG_IRQ_ENABLE; ct 98 drivers/irqchip/irq-goldfish-pic.c ct->regs.disable = GFPIC_REG_IRQ_DISABLE; ct 99 drivers/irqchip/irq-goldfish-pic.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 100 drivers/irqchip/irq-goldfish-pic.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 49 drivers/irqchip/irq-ingenic-tcu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 54 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.ack, mask); ct 55 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.enable, mask); ct 56 drivers/irqchip/irq-ingenic-tcu.c *ct->mask_cache |= mask; ct 63 drivers/irqchip/irq-ingenic-tcu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 68 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.disable, mask); ct 69 drivers/irqchip/irq-ingenic-tcu.c *ct->mask_cache &= ~mask; ct 76 drivers/irqchip/irq-ingenic-tcu.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 81 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.ack, mask); ct 82 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.disable, mask); ct 90 drivers/irqchip/irq-ingenic-tcu.c struct irq_chip_type *ct; ct 131 drivers/irqchip/irq-ingenic-tcu.c ct = gc->chip_types; ct 136 drivers/irqchip/irq-ingenic-tcu.c ct->regs.disable = TCU_REG_TMSR; ct 137 drivers/irqchip/irq-ingenic-tcu.c ct->regs.enable = TCU_REG_TMCR; ct 138 drivers/irqchip/irq-ingenic-tcu.c ct->regs.ack = TCU_REG_TFCR; ct 139 drivers/irqchip/irq-ingenic-tcu.c ct->chip.irq_unmask = ingenic_tcu_gc_unmask_enable_reg; ct 140 drivers/irqchip/irq-ingenic-tcu.c ct->chip.irq_mask = ingenic_tcu_gc_mask_disable_reg; ct 141 drivers/irqchip/irq-ingenic-tcu.c ct->chip.irq_mask_ack = ingenic_tcu_gc_mask_disable_reg_and_ack; ct 142 drivers/irqchip/irq-ingenic-tcu.c ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; ct 83 drivers/irqchip/irq-ingenic.c struct irq_chip_type *ct; ct 131 drivers/irqchip/irq-ingenic.c ct = gc->chip_types; ct 132 drivers/irqchip/irq-ingenic.c ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; ct 133 drivers/irqchip/irq-ingenic.c ct->regs.disable = JZ_REG_INTC_SET_MASK; ct 134 drivers/irqchip/irq-ingenic.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 135 drivers/irqchip/irq-ingenic.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 136 drivers/irqchip/irq-ingenic.c ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct 137 drivers/irqchip/irq-ingenic.c ct->chip.irq_set_wake = irq_gc_set_wake; ct 138 drivers/irqchip/irq-ingenic.c ct->chip.irq_suspend = ingenic_intc_irq_suspend; ct 139 drivers/irqchip/irq-ingenic.c ct->chip.irq_resume = ingenic_intc_irq_resume; ct 107 drivers/irqchip/irq-ls1x.c struct irq_chip_type *ct; ct 159 drivers/irqchip/irq-ls1x.c ct = gc->chip_types; ct 160 drivers/irqchip/irq-ls1x.c ct[0].type = IRQ_TYPE_LEVEL_MASK; ct 161 drivers/irqchip/irq-ls1x.c ct[0].regs.mask = LS_REG_INTC_EN; ct 162 drivers/irqchip/irq-ls1x.c ct[0].regs.ack = LS_REG_INTC_CLR; ct 163 drivers/irqchip/irq-ls1x.c ct[0].chip.irq_unmask = irq_gc_mask_set_bit; ct 164 drivers/irqchip/irq-ls1x.c ct[0].chip.irq_mask = irq_gc_mask_clr_bit; ct 165 drivers/irqchip/irq-ls1x.c ct[0].chip.irq_ack = irq_gc_ack_set_bit; ct 166 drivers/irqchip/irq-ls1x.c ct[0].chip.irq_set_type = ls_intc_set_type; ct 167 drivers/irqchip/irq-ls1x.c ct[0].handler = handle_level_irq; ct 169 drivers/irqchip/irq-ls1x.c ct[1].type = IRQ_TYPE_EDGE_BOTH; ct 170 drivers/irqchip/irq-ls1x.c ct[1].regs.mask = LS_REG_INTC_EN; ct 171 drivers/irqchip/irq-ls1x.c ct[1].regs.ack = LS_REG_INTC_CLR; ct 172 drivers/irqchip/irq-ls1x.c ct[1].chip.irq_unmask = irq_gc_mask_set_bit; ct 173 drivers/irqchip/irq-ls1x.c ct[1].chip.irq_mask = irq_gc_mask_clr_bit; ct 174 drivers/irqchip/irq-ls1x.c ct[1].chip.irq_ack = irq_gc_ack_set_bit; ct 175 drivers/irqchip/irq-ls1x.c ct[1].chip.irq_set_type = ls_intc_set_type; ct 176 drivers/irqchip/irq-ls1x.c ct[1].handler = handle_edge_irq; ct 27 drivers/irqchip/irq-mscc-ocelot.c struct irq_chip_type *ct = irq_data_get_chip_type(data); ct 37 drivers/irqchip/irq-mscc-ocelot.c *ct->mask_cache &= ~mask; ct 202 drivers/irqchip/irq-omap-intc.c struct irq_chip_type *ct; ct 206 drivers/irqchip/irq-omap-intc.c ct = gc->chip_types; ct 208 drivers/irqchip/irq-omap-intc.c ct->type = IRQ_TYPE_LEVEL_MASK; ct 210 drivers/irqchip/irq-omap-intc.c ct->chip.irq_ack = omap_mask_ack_irq; ct 211 drivers/irqchip/irq-omap-intc.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 212 drivers/irqchip/irq-omap-intc.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 214 drivers/irqchip/irq-omap-intc.c ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; ct 216 drivers/irqchip/irq-omap-intc.c ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i; ct 217 drivers/irqchip/irq-omap-intc.c ct->regs.disable = INTC_MIR_SET0 + 32 * i; ct 227 drivers/irqchip/irq-omap-intc.c struct irq_chip_type *ct; ct 231 drivers/irqchip/irq-omap-intc.c ct = gc->chip_types; ct 232 drivers/irqchip/irq-omap-intc.c ct->chip.irq_ack = omap_mask_ack_irq; ct 233 drivers/irqchip/irq-omap-intc.c ct->chip.irq_mask = irq_gc_mask_disable_reg; ct 234 drivers/irqchip/irq-omap-intc.c ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct 235 drivers/irqchip/irq-omap-intc.c ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; ct 237 drivers/irqchip/irq-omap-intc.c ct->regs.enable = INTC_MIR_CLEAR0; ct 238 drivers/irqchip/irq-omap-intc.c ct->regs.disable = INTC_MIR_SET0; ct 131 drivers/irqchip/irq-orion.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 133 drivers/irqchip/irq-orion.c ct->chip.irq_ack(d); ct 134 drivers/irqchip/irq-orion.c ct->chip.irq_unmask(d); ct 41 drivers/irqchip/irq-sirfsoc.c struct irq_chip_type *ct; ct 51 drivers/irqchip/irq-sirfsoc.c ct = gc->chip_types; ct 52 drivers/irqchip/irq-sirfsoc.c ct->chip.irq_mask = irq_gc_mask_clr_bit; ct 53 drivers/irqchip/irq-sirfsoc.c ct->chip.irq_unmask = irq_gc_mask_set_bit; ct 54 drivers/irqchip/irq-sirfsoc.c ct->regs.mask = SIRFSOC_INT_RISC_MASK0; ct 113 drivers/irqchip/irq-sunxi-nmi.c struct irq_chip_type *ct = gc->chip_types; ct 115 drivers/irqchip/irq-sunxi-nmi.c u32 ctrl_off = ct->regs.type; ct 145 drivers/irqchip/irq-sunxi-nmi.c for (i = 0; i < gc->num_ct; i++, ct++) ct 146 drivers/irqchip/irq-sunxi-nmi.c if (ct->type & flow_type) ct 147 drivers/irqchip/irq-sunxi-nmi.c ctrl_off = ct->regs.type; ct 129 drivers/irqchip/irq-tango.c struct irq_chip_type *ct = gc->chip_types; ct 138 drivers/irqchip/irq-tango.c ct[i].chip.irq_ack = irq_gc_ack_set_bit; ct 139 drivers/irqchip/irq-tango.c ct[i].chip.irq_mask = irq_gc_mask_disable_reg; ct 140 drivers/irqchip/irq-tango.c ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set; ct 141 drivers/irqchip/irq-tango.c ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; ct 142 drivers/irqchip/irq-tango.c ct[i].chip.irq_set_type = tangox_irq_set_type; ct 143 drivers/irqchip/irq-tango.c ct[i].chip.name = gc->domain->name; ct 145 drivers/irqchip/irq-tango.c ct[i].regs.enable = ctl_base + IRQ_EN_SET; ct 146 drivers/irqchip/irq-tango.c ct[i].regs.disable = ctl_base + IRQ_EN_CLR; ct 147 drivers/irqchip/irq-tango.c ct[i].regs.ack = edge_base + EDGE_RAWSTAT; ct 148 drivers/irqchip/irq-tango.c ct[i].regs.type = edge_base; ct 151 drivers/irqchip/irq-tango.c ct[0].type = IRQ_TYPE_LEVEL_MASK; ct 152 drivers/irqchip/irq-tango.c ct[0].handler = handle_level_irq; ct 154 drivers/irqchip/irq-tango.c ct[1].type = IRQ_TYPE_EDGE_BOTH; ct 155 drivers/irqchip/irq-tango.c ct[1].handler = handle_edge_irq; ct 157 drivers/irqchip/irq-tango.c intc_writel(chip, ct->regs.disable, 0xffffffff); ct 158 drivers/irqchip/irq-tango.c intc_writel(chip, ct->regs.ack, 0xffffffff); ct 28 drivers/macintosh/windfarm.h int (*set_value)(struct wf_control *ct, s32 val); ct 29 drivers/macintosh/windfarm.h int (*get_value)(struct wf_control *ct, s32 *val); ct 30 drivers/macintosh/windfarm.h s32 (*get_min)(struct wf_control *ct); ct 31 drivers/macintosh/windfarm.h s32 (*get_max)(struct wf_control *ct); ct 32 drivers/macintosh/windfarm.h void (*release)(struct wf_control *ct); ct 56 drivers/macintosh/windfarm.h extern int wf_register_control(struct wf_control *ct); ct 57 drivers/macintosh/windfarm.h extern void wf_unregister_control(struct wf_control *ct); ct 58 drivers/macintosh/windfarm.h extern int wf_get_control(struct wf_control *ct); ct 59 drivers/macintosh/windfarm.h extern void wf_put_control(struct wf_control *ct); ct 61 drivers/macintosh/windfarm.h static inline int wf_control_set_max(struct wf_control *ct) ct 63 drivers/macintosh/windfarm.h s32 vmax = ct->ops->get_max(ct); ct 64 drivers/macintosh/windfarm.h return ct->ops->set_value(ct, vmax); ct 67 drivers/macintosh/windfarm.h static inline int wf_control_set_min(struct wf_control *ct) ct 69 drivers/macintosh/windfarm.h s32 vmin = ct->ops->get_min(ct); ct 70 drivers/macintosh/windfarm.h return ct->ops->set_value(ct, vmin); ct 73 drivers/macintosh/windfarm.h static inline int wf_control_set(struct wf_control *ct, s32 val) ct 75 drivers/macintosh/windfarm.h return ct->ops->set_value(ct, val); ct 78 drivers/macintosh/windfarm.h static inline int wf_control_get(struct wf_control *ct, s32 *val) ct 80 drivers/macintosh/windfarm.h return ct->ops->get_value(ct, val); ct 83 drivers/macintosh/windfarm.h static inline s32 wf_control_get_min(struct wf_control *ct) ct 85 drivers/macintosh/windfarm.h return ct->ops->get_min(ct); ct 88 drivers/macintosh/windfarm.h static inline s32 wf_control_get_max(struct wf_control *ct) ct 90 drivers/macintosh/windfarm.h return ct->ops->get_max(ct); ct 151 drivers/macintosh/windfarm_core.c struct wf_control *ct = container_of(kref, struct wf_control, ref); ct 153 drivers/macintosh/windfarm_core.c DBG("wf: Deleting control %s\n", ct->name); ct 155 drivers/macintosh/windfarm_core.c if (ct->ops && ct->ops->release) ct 156 drivers/macintosh/windfarm_core.c ct->ops->release(ct); ct 158 drivers/macintosh/windfarm_core.c kfree(ct); ct 211 drivers/macintosh/windfarm_core.c struct wf_control *ct; ct 214 drivers/macintosh/windfarm_core.c list_for_each_entry(ct, &wf_controls, link) { ct 215 drivers/macintosh/windfarm_core.c if (!strcmp(ct->name, new_ct->name)) { ct 217 drivers/macintosh/windfarm_core.c " duplicate control %s\n", ct->name); ct 244 drivers/macintosh/windfarm_core.c void wf_unregister_control(struct wf_control *ct) ct 247 drivers/macintosh/windfarm_core.c list_del(&ct->link); ct 250 drivers/macintosh/windfarm_core.c DBG("wf: Unregistered control %s\n", ct->name); ct 252 drivers/macintosh/windfarm_core.c kref_put(&ct->ref, wf_control_release); ct 256 drivers/macintosh/windfarm_core.c int wf_get_control(struct wf_control *ct) ct 258 drivers/macintosh/windfarm_core.c if (!try_module_get(ct->ops->owner)) ct 260 drivers/macintosh/windfarm_core.c kref_get(&ct->ref); ct 265 drivers/macintosh/windfarm_core.c void wf_put_control(struct wf_control *ct) ct 267 drivers/macintosh/windfarm_core.c struct module *mod = ct->ops->owner; ct 268 drivers/macintosh/windfarm_core.c kref_put(&ct->ref, wf_control_release); ct 376 drivers/macintosh/windfarm_core.c struct wf_control *ct; ct 384 drivers/macintosh/windfarm_core.c list_for_each_entry(ct, &wf_controls, link) ct 385 drivers/macintosh/windfarm_core.c wf_notify(WF_EVENT_NEW_CONTROL, ct); ct 24 drivers/macintosh/windfarm_cpufreq_clamp.c static int clamp_set(struct wf_control *ct, s32 value) ct 41 drivers/macintosh/windfarm_cpufreq_clamp.c static int clamp_get(struct wf_control *ct, s32 *value) ct 47 drivers/macintosh/windfarm_cpufreq_clamp.c static s32 clamp_min(struct wf_control *ct) ct 52 drivers/macintosh/windfarm_cpufreq_clamp.c static s32 clamp_max(struct wf_control *ct) ct 78 drivers/macintosh/windfarm_fcu_controls.c static void wf_fcu_fan_release(struct wf_control *ct) ct 80 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 144 drivers/macintosh/windfarm_fcu_controls.c static int wf_fcu_fan_set_rpm(struct wf_control *ct, s32 value) ct 146 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 166 drivers/macintosh/windfarm_fcu_controls.c static int wf_fcu_fan_get_rpm(struct wf_control *ct, s32 *value) ct 168 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 201 drivers/macintosh/windfarm_fcu_controls.c static int wf_fcu_fan_set_pwm(struct wf_control *ct, s32 value) ct 203 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 223 drivers/macintosh/windfarm_fcu_controls.c static int wf_fcu_fan_get_pwm(struct wf_control *ct, s32 *value) ct 225 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 252 drivers/macintosh/windfarm_fcu_controls.c static s32 wf_fcu_fan_min(struct wf_control *ct) ct 254 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 259 drivers/macintosh/windfarm_fcu_controls.c static s32 wf_fcu_fan_max(struct wf_control *ct) ct 261 drivers/macintosh/windfarm_fcu_controls.c struct wf_fcu_fan *fan = ct->priv; ct 268 drivers/macintosh/windfarm_pm112.c struct wf_control *ct; ct 326 drivers/macintosh/windfarm_pm112.c ct = cpu_fans[i]; ct 327 drivers/macintosh/windfarm_pm112.c if (ct == NULL) ct 329 drivers/macintosh/windfarm_pm112.c err = ct->ops->set_value(ct, target * cpu_fan_scale[i] / 100); ct 332 drivers/macintosh/windfarm_pm112.c "error %d\n", ct->name, err); ct 551 drivers/macintosh/windfarm_pm112.c static void pm112_new_control(struct wf_control *ct) ct 555 drivers/macintosh/windfarm_pm112.c if (cpufreq_clamp == NULL && !strcmp(ct->name, "cpufreq-clamp")) { ct 556 drivers/macintosh/windfarm_pm112.c if (wf_get_control(ct) == 0) ct 557 drivers/macintosh/windfarm_pm112.c cpufreq_clamp = ct; ct 561 drivers/macintosh/windfarm_pm112.c if (!strcmp(ct->name, cpu_fan_names[i])) { ct 562 drivers/macintosh/windfarm_pm112.c if (cpu_fans[i] == NULL && wf_get_control(ct) == 0) ct 563 drivers/macintosh/windfarm_pm112.c cpu_fans[i] = ct; ct 569 drivers/macintosh/windfarm_pm112.c if (!strcmp(ct->name, "backside-fan")) { ct 570 drivers/macintosh/windfarm_pm112.c if (backside_fan == NULL && wf_get_control(ct) == 0) ct 571 drivers/macintosh/windfarm_pm112.c backside_fan = ct; ct 572 drivers/macintosh/windfarm_pm112.c } else if (!strcmp(ct->name, "slots-fan")) { ct 573 drivers/macintosh/windfarm_pm112.c if (slots_fan == NULL && wf_get_control(ct) == 0) ct 574 drivers/macintosh/windfarm_pm112.c slots_fan = ct; ct 575 drivers/macintosh/windfarm_pm112.c } else if (!strcmp(ct->name, "drive-bay-fan")) { ct 576 drivers/macintosh/windfarm_pm112.c if (drive_bay_fan == NULL && wf_get_control(ct) == 0) ct 577 drivers/macintosh/windfarm_pm112.c drive_bay_fan = ct; ct 589 drivers/macintosh/windfarm_pm112.c if ((ct = cpu_fans[i]) != NULL) ct 591 drivers/macintosh/windfarm_pm112.c ct->ops->get_max(ct) * 100 / max_exhaust; ct 867 drivers/macintosh/windfarm_pm121.c static struct wf_control* pm121_register_control(struct wf_control *ct, ct 871 drivers/macintosh/windfarm_pm121.c if (controls[id] == NULL && !strcmp(ct->name, match)) { ct 872 drivers/macintosh/windfarm_pm121.c if (wf_get_control(ct) == 0) ct 873 drivers/macintosh/windfarm_pm121.c controls[id] = ct; ct 878 drivers/macintosh/windfarm_pm121.c static void pm121_new_control(struct wf_control *ct) ct 885 drivers/macintosh/windfarm_pm121.c all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all; ct 886 drivers/macintosh/windfarm_pm121.c all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all; ct 887 drivers/macintosh/windfarm_pm121.c all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all; ct 888 drivers/macintosh/windfarm_pm121.c all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all; ct 669 drivers/macintosh/windfarm_pm72.c static void pm72_new_control(struct wf_control *ct) ct 674 drivers/macintosh/windfarm_pm72.c if (!strcmp(ct->name, "cpu-front-fan-0")) ct 675 drivers/macintosh/windfarm_pm72.c cpu_front_fans[0] = ct; ct 676 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "cpu-front-fan-1")) ct 677 drivers/macintosh/windfarm_pm72.c cpu_front_fans[1] = ct; ct 678 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "cpu-rear-fan-0")) ct 679 drivers/macintosh/windfarm_pm72.c cpu_rear_fans[0] = ct; ct 680 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "cpu-rear-fan-1")) ct 681 drivers/macintosh/windfarm_pm72.c cpu_rear_fans[1] = ct; ct 682 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "cpu-pump-0")) ct 683 drivers/macintosh/windfarm_pm72.c cpu_pumps[0] = ct; ct 684 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "cpu-pump-1")) ct 685 drivers/macintosh/windfarm_pm72.c cpu_pumps[1] = ct; ct 686 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "backside-fan")) ct 687 drivers/macintosh/windfarm_pm72.c backside_fan = ct; ct 688 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "slots-fan")) ct 689 drivers/macintosh/windfarm_pm72.c slots_fan = ct; ct 690 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "drive-bay-fan")) ct 691 drivers/macintosh/windfarm_pm72.c drives_fan = ct; ct 692 drivers/macintosh/windfarm_pm72.c else if (!strcmp(ct->name, "cpufreq-clamp")) ct 693 drivers/macintosh/windfarm_pm72.c cpufreq_clamp = ct; ct 611 drivers/macintosh/windfarm_pm81.c static void wf_smu_new_control(struct wf_control *ct) ct 616 drivers/macintosh/windfarm_pm81.c if (fan_cpu_main == NULL && !strcmp(ct->name, "cpu-fan")) { ct 617 drivers/macintosh/windfarm_pm81.c if (wf_get_control(ct) == 0) ct 618 drivers/macintosh/windfarm_pm81.c fan_cpu_main = ct; ct 621 drivers/macintosh/windfarm_pm81.c if (fan_system == NULL && !strcmp(ct->name, "system-fan")) { ct 622 drivers/macintosh/windfarm_pm81.c if (wf_get_control(ct) == 0) ct 623 drivers/macintosh/windfarm_pm81.c fan_system = ct; ct 626 drivers/macintosh/windfarm_pm81.c if (cpufreq_clamp == NULL && !strcmp(ct->name, "cpufreq-clamp")) { ct 627 drivers/macintosh/windfarm_pm81.c if (wf_get_control(ct) == 0) ct 628 drivers/macintosh/windfarm_pm81.c cpufreq_clamp = ct; ct 641 drivers/macintosh/windfarm_pm81.c if (fan_hd == NULL && !strcmp(ct->name, "drive-bay-fan")) { ct 642 drivers/macintosh/windfarm_pm81.c if (wf_get_control(ct) == 0) ct 643 drivers/macintosh/windfarm_pm81.c fan_hd = ct; ct 537 drivers/macintosh/windfarm_pm91.c static void wf_smu_new_control(struct wf_control *ct) ct 542 drivers/macintosh/windfarm_pm91.c if (fan_cpu_main == NULL && !strcmp(ct->name, "cpu-rear-fan-0")) { ct 543 drivers/macintosh/windfarm_pm91.c if (wf_get_control(ct) == 0) ct 544 drivers/macintosh/windfarm_pm91.c fan_cpu_main = ct; ct 547 drivers/macintosh/windfarm_pm91.c if (fan_cpu_second == NULL && !strcmp(ct->name, "cpu-rear-fan-1")) { ct 548 drivers/macintosh/windfarm_pm91.c if (wf_get_control(ct) == 0) ct 549 drivers/macintosh/windfarm_pm91.c fan_cpu_second = ct; ct 552 drivers/macintosh/windfarm_pm91.c if (fan_cpu_third == NULL && !strcmp(ct->name, "cpu-front-fan-0")) { ct 553 drivers/macintosh/windfarm_pm91.c if (wf_get_control(ct) == 0) ct 554 drivers/macintosh/windfarm_pm91.c fan_cpu_third = ct; ct 557 drivers/macintosh/windfarm_pm91.c if (cpufreq_clamp == NULL && !strcmp(ct->name, "cpufreq-clamp")) { ct 558 drivers/macintosh/windfarm_pm91.c if (wf_get_control(ct) == 0) ct 559 drivers/macintosh/windfarm_pm91.c cpufreq_clamp = ct; ct 562 drivers/macintosh/windfarm_pm91.c if (fan_hd == NULL && !strcmp(ct->name, "drive-bay-fan")) { ct 563 drivers/macintosh/windfarm_pm91.c if (wf_get_control(ct) == 0) ct 564 drivers/macintosh/windfarm_pm91.c fan_hd = ct; ct 567 drivers/macintosh/windfarm_pm91.c if (fan_slots == NULL && !strcmp(ct->name, "slots-fan")) { ct 568 drivers/macintosh/windfarm_pm91.c if (wf_get_control(ct) == 0) ct 569 drivers/macintosh/windfarm_pm91.c fan_slots = ct; ct 567 drivers/macintosh/windfarm_rm31.c static void rm31_new_control(struct wf_control *ct) ct 571 drivers/macintosh/windfarm_rm31.c if (!strcmp(ct->name, "cpu-fan-a-0")) ct 572 drivers/macintosh/windfarm_rm31.c cpu_fans[0][0] = ct; ct 573 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "cpu-fan-b-0")) ct 574 drivers/macintosh/windfarm_rm31.c cpu_fans[0][1] = ct; ct 575 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "cpu-fan-c-0")) ct 576 drivers/macintosh/windfarm_rm31.c cpu_fans[0][2] = ct; ct 577 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "cpu-fan-a-1")) ct 578 drivers/macintosh/windfarm_rm31.c cpu_fans[1][0] = ct; ct 579 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "cpu-fan-b-1")) ct 580 drivers/macintosh/windfarm_rm31.c cpu_fans[1][1] = ct; ct 581 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "cpu-fan-c-1")) ct 582 drivers/macintosh/windfarm_rm31.c cpu_fans[1][2] = ct; ct 583 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "backside-fan")) ct 584 drivers/macintosh/windfarm_rm31.c backside_fan = ct; ct 585 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "slots-fan")) ct 586 drivers/macintosh/windfarm_rm31.c slots_fan = ct; ct 587 drivers/macintosh/windfarm_rm31.c else if (!strcmp(ct->name, "cpufreq-clamp")) ct 588 drivers/macintosh/windfarm_rm31.c cpufreq_clamp = ct; ct 108 drivers/macintosh/windfarm_smu_controls.c static void smu_fan_release(struct wf_control *ct) ct 110 drivers/macintosh/windfarm_smu_controls.c struct smu_fan_control *fct = to_smu_fan(ct); ct 115 drivers/macintosh/windfarm_smu_controls.c static int smu_fan_set(struct wf_control *ct, s32 value) ct 117 drivers/macintosh/windfarm_smu_controls.c struct smu_fan_control *fct = to_smu_fan(ct); ct 128 drivers/macintosh/windfarm_smu_controls.c static int smu_fan_get(struct wf_control *ct, s32 *value) ct 130 drivers/macintosh/windfarm_smu_controls.c struct smu_fan_control *fct = to_smu_fan(ct); ct 135 drivers/macintosh/windfarm_smu_controls.c static s32 smu_fan_min(struct wf_control *ct) ct 137 drivers/macintosh/windfarm_smu_controls.c struct smu_fan_control *fct = to_smu_fan(ct); ct 141 drivers/macintosh/windfarm_smu_controls.c static s32 smu_fan_max(struct wf_control *ct) ct 143 drivers/macintosh/windfarm_smu_controls.c struct smu_fan_control *fct = to_smu_fan(ct); ct 1336 drivers/media/platform/s5p-jpeg/jpeg-core.c struct s5p_jpeg_ctx *ct = fh_to_ctx(priv); ct 1338 drivers/media/platform/s5p-jpeg/jpeg-core.c vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type); ct 1343 drivers/media/platform/s5p-jpeg/jpeg-core.c ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed) ct 1345 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data = get_q_data(ct, f->type); ct 1584 drivers/media/platform/s5p-jpeg/jpeg-core.c static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f) ct 1593 drivers/media/platform/s5p-jpeg/jpeg-core.c vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type); ct 1597 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data = get_q_data(ct, f->type); ct 1601 drivers/media/platform/s5p-jpeg/jpeg-core.c v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__); ct 1608 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type); ct 1609 drivers/media/platform/s5p-jpeg/jpeg-core.c if (ct->mode == S5P_JPEG_ENCODE || ct 1610 drivers/media/platform/s5p-jpeg/jpeg-core.c (ct->mode == S5P_JPEG_DECODE && ct 1622 drivers/media/platform/s5p-jpeg/jpeg-core.c if (ct->jpeg->variant->hw_ex4_compat && ct 1623 drivers/media/platform/s5p-jpeg/jpeg-core.c f_type == FMT_TYPE_OUTPUT && ct->mode == S5P_JPEG_ENCODE) ct 1624 drivers/media/platform/s5p-jpeg/jpeg-core.c q_data->size = exynos4_jpeg_get_output_buffer_size(ct, ct 1635 drivers/media/platform/s5p-jpeg/jpeg-core.c ctrl_subs = v4l2_ctrl_find(&ct->ctrl_handler, ct 1639 drivers/media/platform/s5p-jpeg/jpeg-core.c ct->crop_altered = false; ct 1648 drivers/media/platform/s5p-jpeg/jpeg-core.c if (!ct->crop_altered && ct 1649 drivers/media/platform/s5p-jpeg/jpeg-core.c ((ct->mode == S5P_JPEG_DECODE && f_type == FMT_TYPE_CAPTURE) || ct 1650 drivers/media/platform/s5p-jpeg/jpeg-core.c (ct->mode == S5P_JPEG_ENCODE && f_type == FMT_TYPE_OUTPUT))) { ct 1651 drivers/media/platform/s5p-jpeg/jpeg-core.c ct->crop_rect.width = pix->width; ct 1652 drivers/media/platform/s5p-jpeg/jpeg-core.c ct->crop_rect.height = pix->height; ct 1660 drivers/media/platform/s5p-jpeg/jpeg-core.c if (ct->mode == S5P_JPEG_DECODE && ct 1662 drivers/media/platform/s5p-jpeg/jpeg-core.c ct->jpeg->variant->hw3250_compat && ct 1664 drivers/media/platform/s5p-jpeg/jpeg-core.c ct->scale_factor > 2) { ct 1665 drivers/media/platform/s5p-jpeg/jpeg-core.c scale_rect.width = ct->out_q.w / 2; ct 1666 drivers/media/platform/s5p-jpeg/jpeg-core.c scale_rect.height = ct->out_q.h / 2; ct 1667 drivers/media/platform/s5p-jpeg/jpeg-core.c exynos3250_jpeg_try_downscale(ct, &scale_rect); ct 629 drivers/media/rc/img-ir/img-ir-hw.c unsigned int ct; ct 632 drivers/media/rc/img-ir/img-ir-hw.c ct = dec->control.code_type; ct 633 drivers/media/rc/img-ir/img-ir-hw.c if (priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_BROKEN) ct 957 drivers/media/rc/img-ir/img-ir-hw.c unsigned int ct; ct 963 drivers/media/rc/img-ir/img-ir-hw.c ct = hw->decoder->control.code_type; ct 967 drivers/media/rc/img-ir/img-ir-hw.c if (!(priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_IRQ) || ct 995 drivers/media/rc/img-ir/img-ir-hw.c if (hw->ct_quirks[ct] & IMG_IR_QUIRK_CODE_LEN_INCR) ct 1149 drivers/media/rc/imon.c ktime_t ct; ct 1156 drivers/media/rc/imon.c ct = ktime_get(); ct 1157 drivers/media/rc/imon.c msec = ktime_ms_delta(ct, prev_time); ct 1158 drivers/media/rc/imon.c msec_hit = ktime_ms_delta(ct, hit_time); ct 1169 drivers/media/rc/imon.c prev_time = ct; ct 1207 drivers/media/rc/imon.c hit_time = ct; ct 168 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c int ct; ct 181 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c ct = hdcs->exp.cto + hdcs->psmp + (HDCS_ADC_START_SIG_DUR + 2); ct 182 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c cp = hdcs->exp.cto + (hdcs->w * ct / 2); ct 195 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c srowexp = hdcs->w - (cycles + hdcs->exp.er + 13) / ct; ct 197 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c mnct = (hdcs->exp.er + 12 + ct - 1) / ct; ct 203 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c mnct = (hdcs->exp.er + 5 + ct - 1) / ct; ct 204 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c max_srowexp = cp - mnct * ct - 1; ct 2065 drivers/media/usb/pvrusb2/pvrusb2-hdw.c const struct pvr2_device_client_table *ct; ct 2073 drivers/media/usb/pvrusb2/pvrusb2-hdw.c ct = &hdw->hdw_desc->client_table; ct 2074 drivers/media/usb/pvrusb2/pvrusb2-hdw.c for (idx = 0; idx < ct->cnt; idx++) { ct 2075 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if (pvr2_hdw_load_subdev(hdw, &ct->lst[idx]) < 0) okFl = 0; ct 117 drivers/mtd/tests/oobtest.c const void *ct, size_t count) ct 124 drivers/mtd/tests/oobtest.c for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) { ct 137 drivers/mtd/tests/oobtest.c #define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\ ct 537 drivers/net/ethernet/amd/nmclan_cs.c int ct = 0; ct 543 drivers/net/ethernet/amd/nmclan_cs.c if(++ct > 500) ct 587 drivers/net/ethernet/amd/nmclan_cs.c ct = 0; ct 590 drivers/net/ethernet/amd/nmclan_cs.c if(++ ct > 500) ct 271 drivers/net/ethernet/emulex/benet/be_hw.h u8 ct[2]; /* dword 0 */ ct 319 drivers/net/ethernet/emulex/benet/be_hw.h u8 ct[2]; /* dword 1 */ ct 352 drivers/net/ethernet/emulex/benet/be_hw.h u8 ct[2]; /* dword 1 */ ct 1571 drivers/net/wan/z85230.c int ct; ct 1597 drivers/net/wan/z85230.c ct=c->mtu-get_dma_residue(c->rxdma); ct 1598 drivers/net/wan/z85230.c if(ct<0) ct 1599 drivers/net/wan/z85230.c ct=2; /* Shit happens.. */ ct 1634 drivers/net/wan/z85230.c skb = dev_alloc_skb(ct); ct 1639 drivers/net/wan/z85230.c skb_put(skb, ct); ct 1640 drivers/net/wan/z85230.c skb_copy_to_linear_data(skb, rxb, ct); ct 1642 drivers/net/wan/z85230.c c->netdevice->stats.rx_bytes += ct; ct 1661 drivers/net/wan/z85230.c ct=c->count; ct 1681 drivers/net/wan/z85230.c c->netdevice->stats.rx_bytes += ct; ct 1687 drivers/net/wan/z85230.c skb_trim(skb, ct); ct 658 drivers/net/wireless/ath/wil6210/txrx.c int reverse_memcmp(const void *cs, const void *ct, size_t count) ct 663 drivers/net/wireless/ath/wil6210/txrx.c for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; ct 1436 drivers/net/wireless/ath/wil6210/wil6210.h int reverse_memcmp(const void *cs, const void *ct, size_t count); ct 608 drivers/ps3/ps3av_cmd.c info->pb1.ct = 0; ct 481 drivers/s390/char/keyboard.c unsigned int ct; ct 524 drivers/s390/char/keyboard.c ct = kbd->accent_table_size; ct 525 drivers/s390/char/keyboard.c if (put_user(ct, &a->kb_cnt)) ct 528 drivers/s390/char/keyboard.c ct * sizeof(struct kbdiacruc))) ct 540 drivers/s390/char/keyboard.c if (get_user(ct, &a->kb_cnt)) ct 542 drivers/s390/char/keyboard.c if (ct >= MAX_DIACR) ct 544 drivers/s390/char/keyboard.c kbd->accent_table_size = ct; ct 545 drivers/s390/char/keyboard.c for (i = 0; i < ct; i++) { ct 560 drivers/s390/char/keyboard.c if (get_user(ct, &a->kb_cnt)) ct 562 drivers/s390/char/keyboard.c if (ct >= MAX_DIACR) ct 564 drivers/s390/char/keyboard.c kbd->accent_table_size = ct; ct 566 drivers/s390/char/keyboard.c ct * sizeof(struct kbdiacruc))) ct 436 drivers/s390/cio/device_ops.c struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) ct 443 drivers/s390/cio/device_ops.c if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct) ct 131 drivers/s390/net/ctcm_mpc.c __u32 ct, sw, rm, dup; ct 145 drivers/s390/net/ctcm_mpc.c for (ct = 0; ct < len; ct++, ptr++, rptr++) { ct 149 drivers/s390/net/ctcm_mpc.c sprintf(boff, "%4.4X", (__u32)ct); ct 1027 drivers/s390/scsi/zfcp_fc.c struct zfcp_fsf_ct_els *ct = job->dd_data; ct 1038 drivers/s390/scsi/zfcp_fc.c ct->handler = zfcp_fc_ct_job_handler; ct 1039 drivers/s390/scsi/zfcp_fc.c ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->timeout / HZ); ct 958 drivers/s390/scsi/zfcp_fsf.c struct zfcp_fsf_ct_els *ct = req->data; ct 961 drivers/s390/scsi/zfcp_fsf.c ct->status = -EINVAL; ct 968 drivers/s390/scsi/zfcp_fsf.c ct->status = 0; ct 998 drivers/s390/scsi/zfcp_fsf.c if (ct->handler) ct 999 drivers/s390/scsi/zfcp_fsf.c ct->handler(ct->handler_data); ct 1094 drivers/s390/scsi/zfcp_fsf.c struct zfcp_fsf_ct_els *ct, mempool_t *pool, ct 1114 drivers/s390/scsi/zfcp_fsf.c ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); ct 1120 drivers/s390/scsi/zfcp_fsf.c ct->d_id = wka_port->d_id; ct 1121 drivers/s390/scsi/zfcp_fsf.c req->data = ct; ct 83 drivers/scsi/libfc/fc_elsct.c struct fc_ct_hdr *ct; ct 117 drivers/scsi/libfc/fc_elsct.c ct = fc_frame_payload_get(fp, sizeof(*ct)); ct 118 drivers/scsi/libfc/fc_elsct.c if (ct) { ct 119 drivers/scsi/libfc/fc_elsct.c switch (ntohs(ct->ct_cmd)) { ct 1111 drivers/scsi/libfc/fc_lport.c struct fc_ct_hdr *ct; ct 1134 drivers/scsi/libfc/fc_lport.c ct = fc_frame_payload_get(fp, sizeof(*ct)); ct 1136 drivers/scsi/libfc/fc_lport.c if (fh && ct && fh->fh_type == FC_TYPE_CT && ct 1137 drivers/scsi/libfc/fc_lport.c ct->ct_fs_type == FC_FST_DIR && ct 1138 drivers/scsi/libfc/fc_lport.c ct->ct_fs_subtype == FC_NS_SUBTYPE && ct 1139 drivers/scsi/libfc/fc_lport.c ntohs(ct->ct_cmd) == FC_FS_ACC) ct 1187 drivers/scsi/libfc/fc_lport.c struct fc_ct_hdr *ct; ct 1210 drivers/scsi/libfc/fc_lport.c ct = fc_frame_payload_get(fp, sizeof(*ct)); ct 1212 drivers/scsi/libfc/fc_lport.c if (fh && ct && fh->fh_type == FC_TYPE_CT && ct 1213 drivers/scsi/libfc/fc_lport.c ct->ct_fs_type == FC_FST_MGMT && ct 1214 drivers/scsi/libfc/fc_lport.c ct->ct_fs_subtype == FC_FDMI_SUBTYPE) { ct 1217 drivers/scsi/libfc/fc_lport.c ct->ct_reason, ct 1218 drivers/scsi/libfc/fc_lport.c ct->ct_explan); ct 1222 drivers/scsi/libfc/fc_lport.c if (ntohs(ct->ct_cmd) == FC_FS_ACC) ct 2002 drivers/scsi/libfc/fc_lport.c struct fc_ct_req *ct; ct 2013 drivers/scsi/libfc/fc_lport.c ct = fc_frame_payload_get(fp, len); ct 2017 drivers/scsi/libfc/fc_lport.c ct, len); ct 9285 drivers/scsi/lpfc/lpfc_sli.c uint8_t ct = 0; ct 9370 drivers/scsi/lpfc/lpfc_sli.c ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); ct 9373 drivers/scsi/lpfc/lpfc_sli.c bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); ct 9661 drivers/scsi/lpfc/lpfc_sli.c ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); ct 9664 drivers/scsi/lpfc/lpfc_sli.c ct, iocbq->iocb.ulpCommand); ct 381 drivers/scsi/mpt3sas/mpt3sas_base.c struct chain_tracker *ct; ct 385 drivers/scsi/mpt3sas/mpt3sas_base.c ct = &ioc->chain_lookup[index].chains_per_smid[j]; ct 386 drivers/scsi/mpt3sas/mpt3sas_base.c if (ct && ct->chain_buffer_dma == chain_buffer_dma) ct 387 drivers/scsi/mpt3sas/mpt3sas_base.c return ct->chain_buffer; ct 4692 drivers/scsi/mpt3sas/mpt3sas_base.c struct chain_tracker *ct; ct 4785 drivers/scsi/mpt3sas/mpt3sas_base.c ct = &ioc->chain_lookup[i].chains_per_smid[j]; ct 4786 drivers/scsi/mpt3sas/mpt3sas_base.c if (ct && ct->chain_buffer) ct 4788 drivers/scsi/mpt3sas/mpt3sas_base.c ct->chain_buffer, ct 4789 drivers/scsi/mpt3sas/mpt3sas_base.c ct->chain_buffer_dma); ct 4841 drivers/scsi/mpt3sas/mpt3sas_base.c struct chain_tracker *ct; ct 5202 drivers/scsi/mpt3sas/mpt3sas_base.c ct = &ioc->chain_lookup[i].chains_per_smid[j]; ct 5203 drivers/scsi/mpt3sas/mpt3sas_base.c ct->chain_buffer = ct 5206 drivers/scsi/mpt3sas/mpt3sas_base.c ct->chain_buffer_dma = ct 5231 drivers/scsi/mpt3sas/mpt3sas_base.c ct = &ioc->chain_lookup[i].chains_per_smid[j]; ct 5232 drivers/scsi/mpt3sas/mpt3sas_base.c ct->chain_buffer = dma_pool_alloc( ct 5234 drivers/scsi/mpt3sas/mpt3sas_base.c &ct->chain_buffer_dma); ct 5235 drivers/scsi/mpt3sas/mpt3sas_base.c if (!ct->chain_buffer) { ct 161 drivers/scsi/sd.c int ct, rcd, wce, sp; ct 184 drivers/scsi/sd.c ct = sysfs_match_string(sd_cache_types, buf); ct 185 drivers/scsi/sd.c if (ct < 0) ct 188 drivers/scsi/sd.c rcd = ct & 0x01 ? 1 : 0; ct 189 drivers/scsi/sd.c wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0; ct 291 drivers/scsi/sd.c int ct = sdkp->RCD + 2*sdkp->WCE; ct 293 drivers/scsi/sd.c return sprintf(buf, "%s\n", sd_cache_types[ct]); ct 682 drivers/staging/comedi/drivers/amplc_pci230.c static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct, ct 690 drivers/staging/comedi/drivers/amplc_pci230.c comedi_8254_set_mode(dev->pacer, ct, mode); ct 694 drivers/staging/comedi/drivers/amplc_pci230.c outb(pci230_clk_config(ct, clk_src), dev->iobase + PCI230_ZCLK_SCE); ct 699 drivers/staging/comedi/drivers/amplc_pci230.c comedi_8254_write(dev->pacer, ct, count); ct 702 drivers/staging/comedi/drivers/amplc_pci230.c static void pci230_cancel_ct(struct comedi_device *dev, unsigned int ct) ct 705 drivers/staging/comedi/drivers/amplc_pci230.c comedi_8254_set_mode(dev->pacer, ct, I8254_MODE1); ct 2213 drivers/staging/rtl8723bs/core/rtw_security.c static void rijndaelEncrypt(u32 rk[/*44*/], u8 pt[16], u8 ct[16]) ct 2251 drivers/staging/rtl8723bs/core/rtw_security.c PUTU32(ct, s0); ct 2253 drivers/staging/rtl8723bs/core/rtw_security.c PUTU32(ct + 4, s1); ct 2255 drivers/staging/rtl8723bs/core/rtw_security.c PUTU32(ct + 8, s2); ct 2257 drivers/staging/rtl8723bs/core/rtw_security.c PUTU32(ct + 12, s3); ct 335 drivers/staging/rtl8723bs/include/rtw_security.h #define PUTU32(ct, st) { \ ct 336 drivers/staging/rtl8723bs/include/rtw_security.h (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \ ct 337 drivers/staging/rtl8723bs/include/rtw_security.h (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } ct 500 drivers/tty/n_gsm.c int ct = 0; ct 502 drivers/tty/n_gsm.c if (ct % 8 == 0) { ct 507 drivers/tty/n_gsm.c ct++; ct 536 drivers/tty/vt/consolemap.c int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) ct 542 drivers/tty/vt/consolemap.c if (!ct) ct 545 drivers/tty/vt/consolemap.c unilist = vmemdup_user(list, ct * sizeof(struct unipair)); ct 626 drivers/tty/vt/consolemap.c for (plist = unilist; ct; ct--, plist++) { ct 738 drivers/tty/vt/consolemap.c int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list) ct 746 drivers/tty/vt/consolemap.c unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL); ct 764 drivers/tty/vt/consolemap.c if (ect < ct) { ct 775 drivers/tty/vt/consolemap.c if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair))) ct 779 drivers/tty/vt/consolemap.c return ret ? ret : (ect <= ct) ? 0 : -ENOMEM; ct 1724 drivers/tty/vt/keyboard.c unsigned int ct; ct 1729 drivers/tty/vt/keyboard.c if (get_user(ct, &a->kb_cnt)) ct 1731 drivers/tty/vt/keyboard.c if (ct >= MAX_DIACR) ct 1734 drivers/tty/vt/keyboard.c if (ct) { ct 1737 drivers/tty/vt/keyboard.c sizeof(struct kbdiacr) * ct); ct 1744 drivers/tty/vt/keyboard.c accent_table_size = ct; ct 1745 drivers/tty/vt/keyboard.c for (i = 0; i < ct; i++) { ct 1761 drivers/tty/vt/keyboard.c unsigned int ct; ct 1767 drivers/tty/vt/keyboard.c if (get_user(ct, &a->kb_cnt)) ct 1770 drivers/tty/vt/keyboard.c if (ct >= MAX_DIACR) ct 1773 drivers/tty/vt/keyboard.c if (ct) { ct 1775 drivers/tty/vt/keyboard.c ct * sizeof(struct kbdiacruc)); ct 1780 drivers/tty/vt/keyboard.c if (ct) ct 1782 drivers/tty/vt/keyboard.c ct * sizeof(struct kbdiacruc)); ct 1783 drivers/tty/vt/keyboard.c accent_table_size = ct; ct 638 drivers/usb/storage/sddr09.c int result, bulklen, nsg, ct; ct 646 drivers/usb/storage/sddr09.c address = 040000; ct = 1; ct 648 drivers/usb/storage/sddr09.c bulklen += (ct << 9); ct 649 drivers/usb/storage/sddr09.c command[4*nsg+2] = ct; ct 654 drivers/usb/storage/sddr09.c address = 0340000; ct = 1; ct 656 drivers/usb/storage/sddr09.c bulklen += (ct << 9); ct 657 drivers/usb/storage/sddr09.c command[4*nsg+2] = ct; ct 662 drivers/usb/storage/sddr09.c address = 01000000; ct = 2; ct 664 drivers/usb/storage/sddr09.c bulklen += (ct << 9); ct 665 drivers/usb/storage/sddr09.c command[4*nsg+2] = ct; ct 1372 drivers/usb/storage/sddr09.c int ct = 0; ct 1376 drivers/usb/storage/sddr09.c if (ct >= 1000) ct 1379 drivers/usb/storage/sddr09.c ct++; ct 1382 drivers/usb/storage/sddr09.c lbact += ct; ct 120 drivers/video/fbdev/aty/atyfb.h struct pll_ct ct; ct 583 drivers/video/fbdev/aty/atyfb_base.c par->pll.ct.xres = 0; ct 587 drivers/video/fbdev/aty/atyfb_base.c par->pll.ct.xres = var->xres; ct 1846 drivers/video/fbdev/aty/atyfb_base.c u32 dsp_config = pll->ct.dsp_config; ct 1847 drivers/video/fbdev/aty/atyfb_base.c u32 dsp_on_off = pll->ct.dsp_on_off; ct 1849 drivers/video/fbdev/aty/atyfb_base.c clk.pll_ref_div = pll->ct.pll_ref_div; ct 1850 drivers/video/fbdev/aty/atyfb_base.c clk.mclk_fb_div = pll->ct.mclk_fb_div; ct 1851 drivers/video/fbdev/aty/atyfb_base.c clk.mclk_post_div = pll->ct.mclk_post_div_real; ct 1852 drivers/video/fbdev/aty/atyfb_base.c clk.mclk_fb_mult = pll->ct.mclk_fb_mult; ct 1853 drivers/video/fbdev/aty/atyfb_base.c clk.xclk_post_div = pll->ct.xclk_post_div_real; ct 1854 drivers/video/fbdev/aty/atyfb_base.c clk.vclk_fb_div = pll->ct.vclk_fb_div; ct 1855 drivers/video/fbdev/aty/atyfb_base.c clk.vclk_post_div = pll->ct.vclk_post_div_real; ct 1875 drivers/video/fbdev/aty/atyfb_base.c pll->ct.pll_ref_div = clk.pll_ref_div; ct 1876 drivers/video/fbdev/aty/atyfb_base.c pll->ct.mclk_fb_div = clk.mclk_fb_div; ct 1877 drivers/video/fbdev/aty/atyfb_base.c pll->ct.mclk_post_div_real = clk.mclk_post_div; ct 1878 drivers/video/fbdev/aty/atyfb_base.c pll->ct.mclk_fb_mult = clk.mclk_fb_mult; ct 1879 drivers/video/fbdev/aty/atyfb_base.c pll->ct.xclk_post_div_real = clk.xclk_post_div; ct 1880 drivers/video/fbdev/aty/atyfb_base.c pll->ct.vclk_fb_div = clk.vclk_fb_div; ct 1881 drivers/video/fbdev/aty/atyfb_base.c pll->ct.vclk_post_div_real = clk.vclk_post_div; ct 1882 drivers/video/fbdev/aty/atyfb_base.c pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) | ct 1885 drivers/video/fbdev/aty/atyfb_base.c pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) | ct 256 drivers/video/fbdev/aty/mach64_ct.c if ((err = aty_valid_pll_ct(info, vclk_per, &pll->ct))) ct 258 drivers/video/fbdev/aty/mach64_ct.c if (M64_HAS(GTB_DSP) && (err = aty_dsp_gt(info, bpp, &pll->ct))) ct 268 drivers/video/fbdev/aty/mach64_ct.c ret = par->ref_clk_per * pll->ct.pll_ref_div * pll->ct.vclk_post_div_real / pll->ct.vclk_fb_div / 2; ct 270 drivers/video/fbdev/aty/mach64_ct.c if(pll->ct.xres > 0) { ct 272 drivers/video/fbdev/aty/mach64_ct.c ret /= pll->ct.xres; ct 292 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ext_cntl, pll->ct.pll_gen_cntl, pll->ct.pll_vclk_cntl); ct 296 drivers/video/fbdev/aty/mach64_ct.c par->clk_wr_offset, pll->ct.vclk_fb_div, ct 297 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ref_div, pll->ct.vclk_post_div, pll->ct.vclk_post_div_real); ct 314 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_VCLK_CNTL, pll->ct.pll_vclk_cntl, par); ct 320 drivers/video/fbdev/aty/mach64_ct.c tmp |= ((pll->ct.vclk_post_div & 0x03U) << tmp2); ct 327 drivers/video/fbdev/aty/mach64_ct.c tmp |= pll->ct.pll_ext_cntl; ct 332 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(tmp, (pll->ct.vclk_fb_div & 0xFFU), par); ct 334 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_GEN_CNTL, (pll->ct.pll_gen_cntl & (~(PLL_OVERRIDE | PLL_MCLK_RST))) | OSC_EN, par); ct 337 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_VCLK_CNTL, pll->ct.pll_vclk_cntl & ~(PLL_VCLK_RST), par); ct 340 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_GEN_CNTL, pll->ct.pll_gen_cntl, par); ct 341 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_VCLK_CNTL, pll->ct.pll_vclk_cntl, par); ct 359 drivers/video/fbdev/aty/mach64_ct.c aty_st_le32(DSP_CONFIG, pll->ct.dsp_config, par); ct 360 drivers/video/fbdev/aty/mach64_ct.c aty_st_le32(DSP_ON_OFF, pll->ct.dsp_on_off, par); ct 384 drivers/video/fbdev/aty/mach64_ct.c pll->ct.vclk_post_div = (aty_ld_pll_ct(VCLK_POST_DIV, par) >> tmp) & 0x03U; ct 386 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par) & 0x0FU; ct 387 drivers/video/fbdev/aty/mach64_ct.c pll->ct.vclk_fb_div = aty_ld_pll_ct(VCLK0_FB_DIV + clock, par) & 0xFFU; ct 388 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); ct 389 drivers/video/fbdev/aty/mach64_ct.c pll->ct.mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par); ct 391 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_gen_cntl = aty_ld_pll_ct(PLL_GEN_CNTL, par); ct 392 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_vclk_cntl = aty_ld_pll_ct(PLL_VCLK_CNTL, par); ct 395 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_config = aty_ld_le32(DSP_CONFIG, par); ct 396 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_on_off = aty_ld_le32(DSP_ON_OFF, par); ct 409 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par); ct 410 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_post_div = pll->ct.pll_ext_cntl & 0x07; ct 411 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_ref_div = 1; ct 412 drivers/video/fbdev/aty/mach64_ct.c switch (pll->ct.xclk_post_div) { ct 417 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_ref_div = 3; ct 418 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_post_div = 0; ct 422 drivers/video/fbdev/aty/mach64_ct.c printk(KERN_CRIT "atyfb: Unsupported xclk source: %d.\n", pll->ct.xclk_post_div); ct 425 drivers/video/fbdev/aty/mach64_ct.c pll->ct.mclk_fb_mult = 2; ct 426 drivers/video/fbdev/aty/mach64_ct.c if(pll->ct.pll_ext_cntl & PLL_MFB_TIMES_4_2B) { ct 427 drivers/video/fbdev/aty/mach64_ct.c pll->ct.mclk_fb_mult = 4; ct 428 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_post_div -= 1; ct 433 drivers/video/fbdev/aty/mach64_ct.c __func__, pll->ct.mclk_fb_mult, pll->ct.xclk_post_div); ct 439 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay = ((memcntl & 0xc00) >> 10) + ((memcntl & 0x1000) >> 12) + trp + 2; ct 440 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkmaxrasdelay = ((memcntl & 0x70000) >> 16) + trp + 2; ct 443 drivers/video/fbdev/aty/mach64_ct.c pll->ct.fifo_size = 32; ct 445 drivers/video/fbdev/aty/mach64_ct.c pll->ct.fifo_size = 24; ct 446 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay += 2; ct 447 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkmaxrasdelay += 3; ct 453 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 10; ct 455 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 8; ct 456 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay += 2; ct 462 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 9; ct 464 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 8; ct 465 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay += 1; ct 470 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 11; ct 472 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 10; ct 473 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay += 1; ct 477 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 8; ct 478 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay += 3; ct 481 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = 11; ct 482 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkpagefaultdelay += 3; ct 486 drivers/video/fbdev/aty/mach64_ct.c if (pll->ct.xclkmaxrasdelay <= pll->ct.xclkpagefaultdelay) ct 487 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclkmaxrasdelay = pll->ct.xclkpagefaultdelay + 1; ct 496 drivers/video/fbdev/aty/mach64_ct.c pll->ct.dsp_loop_latency = (dsp_config & DSP_LOOP_LATENCY) >> 16; ct 505 drivers/video/fbdev/aty/mach64_ct.c pll->ct.fifo_size = 32; ct 507 drivers/video/fbdev/aty/mach64_ct.c pll->ct.fifo_size = 24; ct 514 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); ct 516 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07]; ct 520 drivers/video/fbdev/aty/mach64_ct.c pll->ct.mclk_fb_div = mclk_fb_div; ct 524 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ref_div = par->pll_per * 2 * 255 / par->ref_clk_per; ct 527 drivers/video/fbdev/aty/mach64_ct.c q = par->ref_clk_per * pll->ct.pll_ref_div * 8 / ct 528 drivers/video/fbdev/aty/mach64_ct.c (pll->ct.mclk_fb_mult * par->xclk_per); ct 538 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_post_div_real = aty_postdividers[xpost_div]; ct 539 drivers/video/fbdev/aty/mach64_ct.c pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; ct 544 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_post_div = xpost_div; ct 545 drivers/video/fbdev/aty/mach64_ct.c pll->ct.xclk_ref_div = 1; ct 550 drivers/video/fbdev/aty/mach64_ct.c pllmclk = (1000000 * pll->ct.mclk_fb_mult * pll->ct.mclk_fb_div) / ct 551 drivers/video/fbdev/aty/mach64_ct.c (par->ref_clk_per * pll->ct.pll_ref_div); ct 553 drivers/video/fbdev/aty/mach64_ct.c __func__, pllmclk, pllmclk / pll->ct.xclk_post_div_real); ct 557 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_gen_cntl = OSC_EN; ct 559 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_gen_cntl = OSC_EN | DLL_PWDN /* | FORCE_DCLK_TRI_STATE */; ct 562 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ext_cntl = 0; ct 564 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ext_cntl = xpost_div; ct 566 drivers/video/fbdev/aty/mach64_ct.c if (pll->ct.mclk_fb_mult == 4) ct 567 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_ext_cntl |= PLL_MFB_TIMES_4_2B; ct 570 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_gen_cntl |= (xpost_div << 4); /* mclk == xclk */ ct 576 drivers/video/fbdev/aty/mach64_ct.c pll->ct.pll_gen_cntl |= (6 << 4); /* mclk == sclk */ ct 578 drivers/video/fbdev/aty/mach64_ct.c q = par->ref_clk_per * pll->ct.pll_ref_div * 4 / par->mclk_per; ct 588 drivers/video/fbdev/aty/mach64_ct.c pll->ct.sclk_fb_div = q * sclk_post_div_real / 8; ct 589 drivers/video/fbdev/aty/mach64_ct.c pll->ct.spll_cntl2 = mpost_div << 4; ct 591 drivers/video/fbdev/aty/mach64_ct.c pllsclk = (1000000 * 2 * pll->ct.sclk_fb_div) / ct 592 drivers/video/fbdev/aty/mach64_ct.c (par->ref_clk_per * pll->ct.pll_ref_div); ct 599 drivers/video/fbdev/aty/mach64_ct.c pll->ct.ext_vpll_cntl = aty_ld_pll_ct(EXT_VPLL_CNTL, par); ct 600 drivers/video/fbdev/aty/mach64_ct.c pll->ct.ext_vpll_cntl &= ~(EXT_VPLL_EN | EXT_VPLL_VGA_EN | EXT_VPLL_INSYNC); ct 618 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(SCLK_FB_DIV, pll->ct.sclk_fb_div, par); ct 619 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(SPLL_CNTL2, pll->ct.spll_cntl2, par); ct 627 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par); ct 628 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_GEN_CNTL, pll->ct.pll_gen_cntl, par); ct 629 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(MCLK_FB_DIV, pll->ct.mclk_fb_div, par); ct 630 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(PLL_EXT_CNTL, pll->ct.pll_ext_cntl, par); ct 631 drivers/video/fbdev/aty/mach64_ct.c aty_st_pll_ct(EXT_VPLL_CNTL, pll->ct.ext_vpll_cntl, par); ct 690 drivers/video/fbdev/omap2/omapfb/dss/dispc.c const struct color_conv_coef *ct) ct 694 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry)); ct 695 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb)); ct 696 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr)); ct 697 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by)); ct 698 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb)); ct 700 drivers/video/fbdev/omap2/omapfb/dss/dispc.c REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11); ct 1768 fs/binfmt_elf.c struct core_thread *ct; ct 1809 fs/binfmt_elf.c for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) { ct 1816 fs/binfmt_elf.c t->task = ct->task; ct 1817 fs/binfmt_elf.c if (ct->task == dump_task || !info->thread) { ct 2013 fs/binfmt_elf.c struct core_thread *ct; ct 2019 fs/binfmt_elf.c for (ct = current->mm->core_state->dumper.next; ct 2020 fs/binfmt_elf.c ct; ct = ct->next) { ct 2025 fs/binfmt_elf.c ets->thread = ct->task; ct 1574 fs/binfmt_elf_fdpic.c struct core_thread *ct; ct 1612 fs/binfmt_elf_fdpic.c for (ct = current->mm->core_state->dumper.next; ct 1613 fs/binfmt_elf_fdpic.c ct; ct = ct->next) { ct 1618 fs/binfmt_elf_fdpic.c tmp->thread = ct->task; ct 5013 fs/btrfs/ioctl.c struct timespec64 ct = current_time(inode); ct 5048 fs/btrfs/ioctl.c sa->rtime.sec = ct.tv_sec; ct 5049 fs/btrfs/ioctl.c sa->rtime.nsec = ct.tv_nsec; ct 495 fs/btrfs/root-tree.c struct timespec64 ct; ct 497 fs/btrfs/root-tree.c ktime_get_real_ts64(&ct); ct 500 fs/btrfs/root-tree.c btrfs_set_stack_timespec_sec(&item->ctime, ct.tv_sec); ct 501 fs/btrfs/root-tree.c btrfs_set_stack_timespec_nsec(&item->ctime, ct.tv_nsec); ct 194 fs/dcache.c static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) ct 200 fs/dcache.c b = load_unaligned_zeropad(ct); ct 206 fs/dcache.c ct += sizeof(unsigned long); ct 217 fs/dcache.c static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) ct 220 fs/dcache.c if (*cs != *ct) ct 223 fs/dcache.c ct++; ct 231 fs/dcache.c static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) ct 251 fs/dcache.c return dentry_string_cmp(cs, ct, tcount); ct 376 include/linux/mroute_base.h int ct; ct 361 include/linux/netfilter.h int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, ct 364 include/linux/netfilter.h unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct, ct 459 include/linux/netfilter.h size_t (*build_size)(const struct nf_conn *ct); ct 460 include/linux/netfilter.h int (*build)(struct sk_buff *skb, struct nf_conn *ct, ct 463 include/linux/netfilter.h int (*parse)(const struct nlattr *attr, struct nf_conn *ct); ct 464 include/linux/netfilter.h int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, ct 466 include/linux/netfilter.h void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, ct 34 include/linux/netfilter/nf_conntrack_h323.h int get_h225_addr(struct nf_conn *ct, unsigned char *data, ct 52 include/linux/netfilter/nf_conntrack_h323.h struct nf_conn *ct, ct 57 include/linux/netfilter/nf_conntrack_h323.h struct nf_conn *ct, ct 62 include/linux/netfilter/nf_conntrack_h323.h struct nf_conn *ct, ct 70 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, ct 76 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, ct 83 include/linux/netfilter/nf_conntrack_h323.h struct nf_conn *ct, ct 90 include/linux/netfilter/nf_conntrack_h323.h extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, ct 305 include/linux/netfilter/nf_conntrack_pptp.h struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 312 include/linux/netfilter/nf_conntrack_pptp.h struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 322 include/linux/netfilter/nf_conntrack_pptp.h (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, ct 25 include/linux/netfilter/nf_conntrack_proto_gre.h int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, ct 30 include/linux/netfilter/nf_conntrack_proto_gre.h void nf_ct_gre_keymap_destroy(struct nf_conn *ct); ct 68 include/linux/netfilter/nf_conntrack_sip.h int (*match_len)(const struct nf_conn *ct, ct 169 include/linux/netfilter/nf_conntrack_sip.h int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr, ct 173 include/linux/netfilter/nf_conntrack_sip.h int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, ct 177 include/linux/netfilter/nf_conntrack_sip.h int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, ct 182 include/linux/netfilter/nf_conntrack_sip.h int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, ct 187 include/linux/netfilter/nf_conntrack_sip.h int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, ct 192 include/linux/netfilter/nf_conntrack_sip.h int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, ct 10 include/linux/netfilter/nf_conntrack_snmp.h struct nf_conn *ct, ct 70 include/linux/vt_kern.h int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); ct 71 include/linux/vt_kern.h int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); ct 100 include/linux/vt_kern.h int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) ct 105 include/linux/vt_kern.h int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, ct 199 include/net/flow_offload.h } ct; ct 731 include/net/ip_vs.h struct ip_vs_conn *ct); ct 1239 include/net/ip_vs.h int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); ct 1308 include/net/ip_vs.h struct ip_vs_conn *ct = cp->control; ct 1310 include/net/ip_vs.h if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) && ct 1311 include/net/ip_vs.h (ct->flags & IP_VS_CONN_F_TEMPLATE)) ct 1312 include/net/ip_vs.h ct->state |= IP_VS_CTPL_S_ASSURED; ct 1575 include/net/ip_vs.h struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1577 include/net/ip_vs.h if (ct) { ct 1578 include/net/ip_vs.h nf_conntrack_put(&ct->ct_general); ct 1600 include/net/ip_vs.h void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, ct 1633 include/net/ip_vs.h struct nf_conn *ct; ct 1637 include/net/ip_vs.h ct = nf_ct_get(skb, &ctinfo); ct 1638 include/net/ip_vs.h if (ct) ct 139 include/net/net_namespace.h struct netns_ct ct; ct 117 include/net/netfilter/nf_conntrack.h static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct) ct 119 include/net/netfilter/nf_conntrack.h return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; ct 122 include/net/netfilter/nf_conntrack.h static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct) ct 124 include/net/netfilter/nf_conntrack.h return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; ct 127 include/net/netfilter/nf_conntrack.h #define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple) ct 134 include/net/netfilter/nf_conntrack.h static inline struct net *nf_ct_net(const struct nf_conn *ct) ct 136 include/net/netfilter/nf_conntrack.h return read_pnet(&ct->ct_net); ct 140 include/net/netfilter/nf_conntrack.h void nf_conntrack_alter_reply(struct nf_conn *ct, ct 159 include/net/netfilter/nf_conntrack.h static inline void nf_ct_put(struct nf_conn *ct) ct 161 include/net/netfilter/nf_conntrack.h WARN_ON(!ct); ct 162 include/net/netfilter/nf_conntrack.h nf_conntrack_put(&ct->ct_general); ct 179 include/net/netfilter/nf_conntrack.h int nf_conntrack_hash_check_insert(struct nf_conn *ct); ct 180 include/net/netfilter/nf_conntrack.h bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); ct 186 include/net/netfilter/nf_conntrack.h void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 191 include/net/netfilter/nf_conntrack.h static inline void nf_ct_refresh_acct(struct nf_conn *ct, ct 196 include/net/netfilter/nf_conntrack.h __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true); ct 200 include/net/netfilter/nf_conntrack.h static inline void nf_ct_refresh(struct nf_conn *ct, ct 204 include/net/netfilter/nf_conntrack.h __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false); ct 208 include/net/netfilter/nf_conntrack.h bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 212 include/net/netfilter/nf_conntrack.h static inline bool nf_ct_kill(struct nf_conn *ct) ct 214 include/net/netfilter/nf_conntrack.h return nf_ct_delete(ct, 0, 0); ct 231 include/net/netfilter/nf_conntrack.h void nf_conntrack_free(struct nf_conn *ct); ct 238 include/net/netfilter/nf_conntrack.h static inline int nf_ct_is_template(const struct nf_conn *ct) ct 240 include/net/netfilter/nf_conntrack.h return test_bit(IPS_TEMPLATE_BIT, &ct->status); ct 244 include/net/netfilter/nf_conntrack.h static inline int nf_ct_is_confirmed(const struct nf_conn *ct) ct 246 include/net/netfilter/nf_conntrack.h return test_bit(IPS_CONFIRMED_BIT, &ct->status); ct 249 include/net/netfilter/nf_conntrack.h static inline int nf_ct_is_dying(const struct nf_conn *ct) ct 251 include/net/netfilter/nf_conntrack.h return test_bit(IPS_DYING_BIT, &ct->status); ct 263 include/net/netfilter/nf_conntrack.h static inline unsigned long nf_ct_expires(const struct nf_conn *ct) ct 265 include/net/netfilter/nf_conntrack.h s32 timeout = ct->timeout - nfct_time_stamp; ct 270 include/net/netfilter/nf_conntrack.h static inline bool nf_ct_is_expired(const struct nf_conn *ct) ct 272 include/net/netfilter/nf_conntrack.h return (__s32)(ct->timeout - nfct_time_stamp) <= 0; ct 276 include/net/netfilter/nf_conntrack.h static inline bool nf_ct_should_gc(const struct nf_conn *ct) ct 278 include/net/netfilter/nf_conntrack.h return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) && ct 279 include/net/netfilter/nf_conntrack.h !nf_ct_is_dying(ct); ct 314 include/net/netfilter/nf_conntrack.h u32 nf_ct_get_id(const struct nf_conn *ct); ct 317 include/net/netfilter/nf_conntrack.h nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) ct 319 include/net/netfilter/nf_conntrack.h skb_set_nfct(skb, (unsigned long)ct | info); ct 322 include/net/netfilter/nf_conntrack.h #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) ct 323 include/net/netfilter/nf_conntrack.h #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) ct 324 include/net/netfilter/nf_conntrack.h #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v)) ct 24 include/net/netfilter/nf_conntrack_acct.h struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct) ct 26 include/net/netfilter/nf_conntrack_acct.h return nf_ct_ext_find(ct, NF_CT_EXT_ACCT); ct 30 include/net/netfilter/nf_conntrack_acct.h struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) ct 33 include/net/netfilter/nf_conntrack_acct.h struct net *net = nf_ct_net(ct); ct 36 include/net/netfilter/nf_conntrack_acct.h if (!net->ct.sysctl_acct) ct 39 include/net/netfilter/nf_conntrack_acct.h acct = nf_ct_ext_add(ct, NF_CT_EXT_ACCT, gfp); ct 54 include/net/netfilter/nf_conntrack_acct.h return net->ct.sysctl_acct != 0; ct 64 include/net/netfilter/nf_conntrack_acct.h net->ct.sysctl_acct = enable; ct 58 include/net/netfilter/nf_conntrack_core.h struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb); ct 61 include/net/netfilter/nf_conntrack_core.h if (ct) { ct 62 include/net/netfilter/nf_conntrack_core.h if (!nf_ct_is_confirmed(ct)) ct 65 include/net/netfilter/nf_conntrack_core.h nf_ct_deliver_cached_events(ct); ct 71 include/net/netfilter/nf_conntrack_core.h struct nf_conn *ct, enum ip_conntrack_info ctinfo); ct 32 include/net/netfilter/nf_conntrack_ecache.h nf_ct_ecache_find(const struct nf_conn *ct) ct 35 include/net/netfilter/nf_conntrack_ecache.h return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE); ct 42 include/net/netfilter/nf_conntrack_ecache.h nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) ct 45 include/net/netfilter/nf_conntrack_ecache.h struct net *net = nf_ct_net(ct); ct 48 include/net/netfilter/nf_conntrack_ecache.h if (!ctmask && !expmask && net->ct.sysctl_events) { ct 55 include/net/netfilter/nf_conntrack_ecache.h e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); ct 70 include/net/netfilter/nf_conntrack_ecache.h struct nf_conn *ct; ct 84 include/net/netfilter/nf_conntrack_ecache.h void nf_ct_deliver_cached_events(struct nf_conn *ct); ct 85 include/net/netfilter/nf_conntrack_ecache.h int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, ct 90 include/net/netfilter/nf_conntrack_ecache.h static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) ct 95 include/net/netfilter/nf_conntrack_ecache.h struct nf_conn *ct, ct 105 include/net/netfilter/nf_conntrack_ecache.h nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) ct 108 include/net/netfilter/nf_conntrack_ecache.h struct net *net = nf_ct_net(ct); ct 111 include/net/netfilter/nf_conntrack_ecache.h if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) ct 114 include/net/netfilter/nf_conntrack_ecache.h e = nf_ct_ecache_find(ct); ct 123 include/net/netfilter/nf_conntrack_ecache.h nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, ct 127 include/net/netfilter/nf_conntrack_ecache.h const struct net *net = nf_ct_net(ct); ct 129 include/net/netfilter/nf_conntrack_ecache.h if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) ct 132 include/net/netfilter/nf_conntrack_ecache.h return nf_conntrack_eventmask_report(1 << event, ct, portid, report); ct 139 include/net/netfilter/nf_conntrack_ecache.h nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) ct 142 include/net/netfilter/nf_conntrack_ecache.h const struct net *net = nf_ct_net(ct); ct 144 include/net/netfilter/nf_conntrack_ecache.h if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) ct 147 include/net/netfilter/nf_conntrack_ecache.h return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); ct 211 include/net/netfilter/nf_conntrack_ecache.h if (!delayed_work_pending(&net->ct.ecache_dwork)) { ct 212 include/net/netfilter/nf_conntrack_ecache.h schedule_delayed_work(&net->ct.ecache_dwork, HZ); ct 213 include/net/netfilter/nf_conntrack_ecache.h net->ct.ecache_dwork_pending = true; ct 221 include/net/netfilter/nf_conntrack_ecache.h if (net->ct.ecache_dwork_pending) { ct 222 include/net/netfilter/nf_conntrack_ecache.h net->ct.ecache_dwork_pending = false; ct 223 include/net/netfilter/nf_conntrack_ecache.h mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0); ct 112 include/net/netfilter/nf_conntrack_expect.h void nf_ct_remove_expectations(struct nf_conn *ct); ct 57 include/net/netfilter/nf_conntrack_extend.h static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) ct 59 include/net/netfilter/nf_conntrack_extend.h return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); ct 62 include/net/netfilter/nf_conntrack_extend.h static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) ct 64 include/net/netfilter/nf_conntrack_extend.h if (!nf_ct_ext_exist(ct, id)) ct 67 include/net/netfilter/nf_conntrack_extend.h return (void *)ct->ext + ct->ext->offset[id]; ct 73 include/net/netfilter/nf_conntrack_extend.h void nf_ct_ext_destroy(struct nf_conn *ct); ct 78 include/net/netfilter/nf_conntrack_extend.h static inline void nf_ct_ext_free(struct nf_conn *ct) ct 80 include/net/netfilter/nf_conntrack_extend.h if (ct->ext) ct 81 include/net/netfilter/nf_conntrack_extend.h kfree_rcu(ct->ext, rcu); ct 85 include/net/netfilter/nf_conntrack_extend.h void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); ct 89 include/net/netfilter/nf_conntrack_extend.h void (*destroy)(struct nf_conn *ct); ct 47 include/net/netfilter/nf_conntrack_helper.h struct nf_conn *ct, ct 50 include/net/netfilter/nf_conntrack_helper.h void (*destroy)(struct nf_conn *ct); ct 52 include/net/netfilter/nf_conntrack_helper.h int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct); ct 53 include/net/netfilter/nf_conntrack_helper.h int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct); ct 100 include/net/netfilter/nf_conntrack_helper.h struct nf_conn *ct, ct 103 include/net/netfilter/nf_conntrack_helper.h struct nf_conn *ct), ct 113 include/net/netfilter/nf_conntrack_helper.h struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp); ct 115 include/net/netfilter/nf_conntrack_helper.h int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, ct 118 include/net/netfilter/nf_conntrack_helper.h void nf_ct_helper_destroy(struct nf_conn *ct); ct 120 include/net/netfilter/nf_conntrack_helper.h static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) ct 122 include/net/netfilter/nf_conntrack_helper.h return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); ct 125 include/net/netfilter/nf_conntrack_helper.h static inline void *nfct_help_data(const struct nf_conn *ct) ct 129 include/net/netfilter/nf_conntrack_helper.h help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER); ct 139 include/net/netfilter/nf_conntrack_helper.h int nf_conntrack_broadcast_help(struct sk_buff *skb, struct nf_conn *ct, ct 146 include/net/netfilter/nf_conntrack_helper.h void (*expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp); ct 150 include/net/netfilter/nf_conntrack_helper.h void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, ct 31 include/net/netfilter/nf_conntrack_l4proto.h bool (*can_early_drop)(const struct nf_conn *ct); ct 35 include/net/netfilter/nf_conntrack_l4proto.h struct nf_conn *ct); ct 38 include/net/netfilter/nf_conntrack_l4proto.h int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); ct 94 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_icmp_packet(struct nf_conn *ct, ct 99 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_icmpv6_packet(struct nf_conn *ct, ct 104 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_udp_packet(struct nf_conn *ct, ct 109 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_udplite_packet(struct nf_conn *ct, ct 114 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_tcp_packet(struct nf_conn *ct, ct 119 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_dccp_packet(struct nf_conn *ct, ct 124 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_sctp_packet(struct nf_conn *ct, ct 129 include/net/netfilter/nf_conntrack_l4proto.h int nf_conntrack_gre_packet(struct nf_conn *ct, ct 162 include/net/netfilter/nf_conntrack_l4proto.h const struct nf_conn *ct, ct 175 include/net/netfilter/nf_conntrack_l4proto.h const struct nf_conn *ct, ct 182 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.generic; ct 187 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.tcp; ct 192 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.udp; ct 197 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.icmp; ct 202 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.icmpv6; ct 209 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.dccp; ct 216 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.sctp; ct 223 include/net/netfilter/nf_conntrack_l4proto.h return &net->ct.nf_ct_proto.gre; ct 20 include/net/netfilter/nf_conntrack_labels.h static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) ct 23 include/net/netfilter/nf_conntrack_labels.h return nf_ct_ext_find(ct, NF_CT_EXT_LABELS); ct 29 include/net/netfilter/nf_conntrack_labels.h static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) ct 32 include/net/netfilter/nf_conntrack_labels.h struct net *net = nf_ct_net(ct); ct 34 include/net/netfilter/nf_conntrack_labels.h if (net->ct.labels_used == 0) ct 37 include/net/netfilter/nf_conntrack_labels.h return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); ct 43 include/net/netfilter/nf_conntrack_labels.h int nf_connlabels_replace(struct nf_conn *ct, ct 24 include/net/netfilter/nf_conntrack_seqadj.h static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct) ct 26 include/net/netfilter/nf_conntrack_seqadj.h return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ); ct 29 include/net/netfilter/nf_conntrack_seqadj.h static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct) ct 31 include/net/netfilter/nf_conntrack_seqadj.h return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC); ct 34 include/net/netfilter/nf_conntrack_seqadj.h int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 36 include/net/netfilter/nf_conntrack_seqadj.h int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 38 include/net/netfilter/nf_conntrack_seqadj.h void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct, ct 41 include/net/netfilter/nf_conntrack_seqadj.h int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, ct 43 include/net/netfilter/nf_conntrack_seqadj.h s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq); ct 14 include/net/netfilter/nf_conntrack_synproxy.h static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct) ct 17 include/net/netfilter/nf_conntrack_synproxy.h return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY); ct 23 include/net/netfilter/nf_conntrack_synproxy.h static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) ct 26 include/net/netfilter/nf_conntrack_synproxy.h return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC); ct 32 include/net/netfilter/nf_conntrack_synproxy.h static inline bool nf_ct_add_synproxy(struct nf_conn *ct, ct 37 include/net/netfilter/nf_conntrack_synproxy.h if (!nfct_seqadj_ext_add(ct)) ct 40 include/net/netfilter/nf_conntrack_synproxy.h if (!nfct_synproxy_ext_add(ct)) ct 49 include/net/netfilter/nf_conntrack_timeout.h struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct) ct 52 include/net/netfilter/nf_conntrack_timeout.h return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT); ct 59 include/net/netfilter/nf_conntrack_timeout.h struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, ct 66 include/net/netfilter/nf_conntrack_timeout.h timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp); ct 78 include/net/netfilter/nf_conntrack_timeout.h static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct) ct 84 include/net/netfilter/nf_conntrack_timeout.h timeout_ext = nf_ct_timeout_find(ct); ct 95 include/net/netfilter/nf_conntrack_timeout.h int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num, ct 97 include/net/netfilter/nf_conntrack_timeout.h void nf_ct_destroy_timeout(struct nf_conn *ct); ct 109 include/net/netfilter/nf_conntrack_timeout.h static inline int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, ct 116 include/net/netfilter/nf_conntrack_timeout.h static inline void nf_ct_destroy_timeout(struct nf_conn *ct) ct 17 include/net/netfilter/nf_conntrack_timestamp.h struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct) ct 20 include/net/netfilter/nf_conntrack_timestamp.h return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP); ct 27 include/net/netfilter/nf_conntrack_timestamp.h struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp) ct 30 include/net/netfilter/nf_conntrack_timestamp.h struct net *net = nf_ct_net(ct); ct 32 include/net/netfilter/nf_conntrack_timestamp.h if (!net->ct.sysctl_tstamp) ct 35 include/net/netfilter/nf_conntrack_timestamp.h return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp); ct 9 include/net/netfilter/nf_conntrack_zones.h nf_ct_zone(const struct nf_conn *ct) ct 12 include/net/netfilter/nf_conntrack_zones.h return &ct->zone; ct 42 include/net/netfilter/nf_conntrack_zones.h static inline void nf_ct_zone_add(struct nf_conn *ct, ct 46 include/net/netfilter/nf_conntrack_zones.h ct->zone = *zone; ct 89 include/net/netfilter/nf_flow_table.h struct flow_offload *flow_offload_alloc(struct nf_conn *ct, ct 39 include/net/netfilter/nf_nat.h unsigned int nf_nat_setup_info(struct nf_conn *ct, ct 43 include/net/netfilter/nf_nat.h extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct, ct 46 include/net/netfilter/nf_nat.h struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct); ct 48 include/net/netfilter/nf_nat.h static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct) ct 51 include/net/netfilter/nf_nat.h return nf_ct_ext_find(ct, NF_CT_EXT_NAT); ct 76 include/net/netfilter/nf_nat.h unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 79 include/net/netfilter/nf_nat.h unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct, ct 86 include/net/netfilter/nf_nat.h int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, ct 90 include/net/netfilter/nf_nat.h int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, ct 109 include/net/netfilter/nf_nat.h static inline int nf_nat_initialized(struct nf_conn *ct, ct 113 include/net/netfilter/nf_nat.h return ct->status & IPS_SRC_NAT_DONE; ct 115 include/net/netfilter/nf_nat.h return ct->status & IPS_DST_NAT_DONE; ct 11 include/net/netfilter/nf_nat_helper.h bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct, ct 18 include/net/netfilter/nf_nat_helper.h struct nf_conn *ct, ct 26 include/net/netfilter/nf_nat_helper.h return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, ct 31 include/net/netfilter/nf_nat_helper.h bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct, ct 39 include/net/netfilter/nf_nat_helper.h void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); ct 97 include/scsi/fc_encode.h struct fc_ct_req *ct; ct 101 include/scsi/fc_encode.h ct = fc_frame_payload_get(fp, ct_plen); ct 102 include/scsi/fc_encode.h memset(ct, 0, ct_plen); ct 103 include/scsi/fc_encode.h ct->hdr.ct_rev = FC_CT_REV; ct 104 include/scsi/fc_encode.h ct->hdr.ct_fs_type = fs_type; ct 105 include/scsi/fc_encode.h ct->hdr.ct_fs_subtype = subtype; ct 106 include/scsi/fc_encode.h ct->hdr.ct_cmd = htons((u16) op); ct 107 include/scsi/fc_encode.h return ct; ct 124 include/scsi/fc_encode.h struct fc_ct_req *ct; ct 129 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft), ct 131 include/scsi/fc_encode.h ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; ct 135 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid), ct 137 include/scsi/fc_encode.h ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; ct 138 include/scsi/fc_encode.h hton24(ct->payload.fid.fp_fid, fc_id); ct 142 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft), ct 144 include/scsi/fc_encode.h hton24(ct->payload.rft.fid.fp_fid, lport->port_id); ct 145 include/scsi/fc_encode.h ct->payload.rft.fts = lport->fcts; ct 149 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id), ct 151 include/scsi/fc_encode.h hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id); ct 152 include/scsi/fc_encode.h ct->payload.rff.fr_type = FC_TYPE_FCP; ct 154 include/scsi/fc_encode.h ct->payload.rff.fr_feat = FCP_FEAT_INIT; ct 156 include/scsi/fc_encode.h ct->payload.rff.fr_feat |= FCP_FEAT_TARG; ct 160 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id), ct 162 include/scsi/fc_encode.h hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id); ct 163 include/scsi/fc_encode.h put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); ct 168 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len, ct 170 include/scsi/fc_encode.h hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id); ct 171 include/scsi/fc_encode.h strncpy(ct->payload.spn.fr_name, ct 173 include/scsi/fc_encode.h ct->payload.spn.fr_name_len = len; ct 178 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len, ct 180 include/scsi/fc_encode.h put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn); ct 181 include/scsi/fc_encode.h strncpy(ct->payload.snn.fr_name, ct 183 include/scsi/fc_encode.h ct->payload.snn.fr_name_len = len; ct 208 include/scsi/fc_encode.h struct fc_ct_req *ct; ct 230 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, ct 234 include/scsi/fc_encode.h put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id); ct 236 include/scsi/fc_encode.h put_unaligned_be32(1, &ct->payload.rhba.port.numport); ct 239 include/scsi/fc_encode.h &ct->payload.rhba.port.port[0].portname); ct 243 include/scsi/fc_encode.h &ct->payload.rhba.hba_attrs.numattrs); ct 244 include/scsi/fc_encode.h hba_attrs = &ct->payload.rhba.hba_attrs; ct 376 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, ct 381 include/scsi/fc_encode.h &ct->payload.rpa.port.portname); ct 385 include/scsi/fc_encode.h &ct->payload.rpa.hba_attrs.numattrs); ct 387 include/scsi/fc_encode.h hba_attrs = &ct->payload.rpa.hba_attrs; ct 467 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, ct 471 include/scsi/fc_encode.h &ct->payload.dprt.port.portname); ct 475 include/scsi/fc_encode.h ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT, ct 478 include/scsi/fc_encode.h put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id); ct 27 include/uapi/linux/netfilter/xt_CT.h struct nf_conn *ct __attribute__((aligned(8))); ct 39 include/uapi/linux/netfilter/xt_CT.h struct nf_conn *ct __attribute__((aligned(8))); ct 39 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 43 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.disable); ct 44 kernel/irq/generic-chip.c *ct->mask_cache &= ~mask; ct 58 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 62 kernel/irq/generic-chip.c *ct->mask_cache |= mask; ct 63 kernel/irq/generic-chip.c irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); ct 78 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 82 kernel/irq/generic-chip.c *ct->mask_cache &= ~mask; ct 83 kernel/irq/generic-chip.c irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); ct 98 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 102 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.enable); ct 103 kernel/irq/generic-chip.c *ct->mask_cache |= mask; ct 114 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 118 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.ack); ct 130 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 134 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.ack); ct 153 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 157 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.disable); ct 158 kernel/irq/generic-chip.c *ct->mask_cache &= ~mask; ct 159 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.ack); ct 170 kernel/irq/generic-chip.c struct irq_chip_type *ct = irq_data_get_chip_type(d); ct 174 kernel/irq/generic-chip.c irq_reg_writel(gc, mask, ct->regs.eoi); ct 256 kernel/irq/generic-chip.c struct irq_chip_type *ct = gc->chip_types; ct 257 kernel/irq/generic-chip.c u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask; ct 262 kernel/irq/generic-chip.c mskptr = &ct[i].mask_cache_priv; ct 263 kernel/irq/generic-chip.c mskreg = ct[i].regs.mask; ct 265 kernel/irq/generic-chip.c ct[i].mask_cache = mskptr; ct 383 kernel/irq/generic-chip.c struct irq_chip_type *ct; ct 400 kernel/irq/generic-chip.c ct = gc->chip_types; ct 401 kernel/irq/generic-chip.c chip = &ct->chip; ct 422 kernel/irq/generic-chip.c irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL); ct 470 kernel/irq/generic-chip.c struct irq_chip_type *ct = gc->chip_types; ct 471 kernel/irq/generic-chip.c struct irq_chip *chip = &ct->chip; ct 496 kernel/irq/generic-chip.c irq_set_chip_and_handler(i, chip, ct->handler); ct 514 kernel/irq/generic-chip.c struct irq_chip_type *ct = gc->chip_types; ct 517 kernel/irq/generic-chip.c for (i = 0; i < gc->num_ct; i++, ct++) { ct 518 kernel/irq/generic-chip.c if (ct->type & type) { ct 519 kernel/irq/generic-chip.c d->chip = &ct->chip; ct 520 kernel/irq/generic-chip.c irq_data_to_desc(d)->handle_irq = ct->handler; ct 583 kernel/irq/generic-chip.c struct irq_chip_type *ct = gc->chip_types; ct 585 kernel/irq/generic-chip.c if (ct->chip.irq_suspend) { ct 589 kernel/irq/generic-chip.c ct->chip.irq_suspend(data); ct 603 kernel/irq/generic-chip.c struct irq_chip_type *ct = gc->chip_types; ct 608 kernel/irq/generic-chip.c if (ct->chip.irq_resume) { ct 612 kernel/irq/generic-chip.c ct->chip.irq_resume(data); ct 626 kernel/irq/generic-chip.c struct irq_chip_type *ct = gc->chip_types; ct 628 kernel/irq/generic-chip.c if (ct->chip.irq_pm_shutdown) { ct 632 kernel/irq/generic-chip.c ct->chip.irq_pm_shutdown(data); ct 330 kernel/time/posix-cpu-timers.c struct task_cputime ct; ct 332 kernel/time/posix-cpu-timers.c thread_group_cputime(tsk, &ct); ct 333 kernel/time/posix-cpu-timers.c store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime); ct 358 lib/string.c int strcmp(const char *cs, const char *ct) ct 364 lib/string.c c2 = *ct++; ct 382 lib/string.c int strncmp(const char *cs, const char *ct, size_t count) ct 388 lib/string.c c2 = *ct++; ct 608 lib/string.c char *strpbrk(const char *cs, const char *ct) ct 613 lib/string.c for (sc2 = ct; *sc2 != '\0'; ++sc2) { ct 635 lib/string.c char *strsep(char **s, const char *ct) ct 643 lib/string.c end = strpbrk(sbegin, ct); ct 879 lib/string.c __visible int memcmp(const void *cs, const void *ct, size_t count) ct 884 lib/string.c for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) ct 129 lib/zstd/fse.h FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct); ct 266 lib/zstd/fse.h size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits); ct 269 lib/zstd/fse.h size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue); ct 276 lib/zstd/fse.h size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize); ct 301 lib/zstd/fse.h static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct); ct 427 lib/zstd/fse.h ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct) ct 429 lib/zstd/fse.h const void *ptr = ct; ct 434 lib/zstd/fse.h statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1)); ct 441 lib/zstd/fse.h ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol) ct 443 lib/zstd/fse.h FSE_initCState(statePtr, ct); ct 92 lib/zstd/fse_compress.c size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize) ct 96 lib/zstd/fse_compress.c void *const ptr = ct; ct 666 lib/zstd/fse_compress.c size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits) ct 671 lib/zstd/fse_compress.c void *const ptr = ct; ct 702 lib/zstd/fse_compress.c size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue) ct 704 lib/zstd/fse_compress.c void *ptr = ct; ct 724 lib/zstd/fse_compress.c static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast) ct 745 lib/zstd/fse_compress.c FSE_initCState2(&CState1, ct, *--ip); ct 746 lib/zstd/fse_compress.c FSE_initCState2(&CState2, ct, *--ip); ct 750 lib/zstd/fse_compress.c FSE_initCState2(&CState2, ct, *--ip); ct 751 lib/zstd/fse_compress.c FSE_initCState2(&CState1, ct, *--ip); ct 785 lib/zstd/fse_compress.c size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct) ct 790 lib/zstd/fse_compress.c return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); ct 792 lib/zstd/fse_compress.c return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); ct 309 net/appletalk/aarp.c int ct; ct 313 net/appletalk/aarp.c for (ct = 0; ct < AARP_HASH_SIZE; ct++) { ct 314 net/appletalk/aarp.c __aarp_expire_timer(&resolved[ct]); ct 315 net/appletalk/aarp.c __aarp_kick(&unresolved[ct]); ct 316 net/appletalk/aarp.c __aarp_expire_timer(&unresolved[ct]); ct 317 net/appletalk/aarp.c __aarp_expire_timer(&proxies[ct]); ct 331 net/appletalk/aarp.c int ct; ct 339 net/appletalk/aarp.c for (ct = 0; ct < AARP_HASH_SIZE; ct++) { ct 340 net/appletalk/aarp.c __aarp_expire_device(&resolved[ct], dev); ct 341 net/appletalk/aarp.c __aarp_expire_device(&unresolved[ct], dev); ct 342 net/appletalk/aarp.c __aarp_expire_device(&proxies[ct], dev); ct 365 net/appletalk/aarp.c int ct; ct 368 net/appletalk/aarp.c for (ct = 0; ct < AARP_HASH_SIZE; ct++) { ct 369 net/appletalk/aarp.c __aarp_expire_all(&resolved[ct]); ct 370 net/appletalk/aarp.c __aarp_expire_all(&unresolved[ct]); ct 371 net/appletalk/aarp.c __aarp_expire_all(&proxies[ct]); ct 899 net/appletalk/aarp.c int ct; ct 903 net/appletalk/aarp.c for (ct = 0; ct < AARP_HASH_SIZE; ct++) { ct 904 net/appletalk/aarp.c __aarp_expire_device(&resolved[ct], dev); ct 905 net/appletalk/aarp.c __aarp_expire_device(&unresolved[ct], dev); ct 906 net/appletalk/aarp.c __aarp_expire_device(&proxies[ct], dev); ct 921 net/appletalk/aarp.c int ct = iter->bucket; ct 927 net/appletalk/aarp.c while (ct < AARP_HASH_SIZE) { ct 928 net/appletalk/aarp.c for (entry = table[ct]; entry; entry = entry->next) { ct 931 net/appletalk/aarp.c iter->bucket = ct; ct 935 net/appletalk/aarp.c ++ct; ct 939 net/appletalk/aarp.c ct = 0; ct 944 net/appletalk/aarp.c ct = 0; ct 663 net/appletalk/ddp.c int ct; ct 770 net/appletalk/ddp.c for (ct = ntohs(nr->nr_firstnet); ct 771 net/appletalk/ddp.c ct <= limit; ct++) { ct 772 net/appletalk/ddp.c sa->sat_addr.s_net = htons(ct); ct 1130 net/ax25/af_ax25.c int ct = 0, err = 0; ct 1203 net/ax25/af_ax25.c while (ct < fsa->fsa_ax25.sax25_ndigis) { ct 1204 net/ax25/af_ax25.c if ((fsa->fsa_digipeater[ct].ax25_call[6] & ct 1206 net/ax25/af_ax25.c digi->repeated[ct] = 1; ct 1207 net/ax25/af_ax25.c digi->lastrepeat = ct; ct 1209 net/ax25/af_ax25.c digi->repeated[ct] = 0; ct 1211 net/ax25/af_ax25.c digi->calls[ct] = fsa->fsa_digipeater[ct]; ct 1212 net/ax25/af_ax25.c ct++; ct 1506 net/ax25/af_ax25.c int ct = 0; ct 1517 net/ax25/af_ax25.c while (ct < usax->sax25_ndigis) { ct 1518 net/ax25/af_ax25.c dtmp.repeated[ct] = 0; ct 1519 net/ax25/af_ax25.c dtmp.calls[ct] = fsa->fsa_digipeater[ct]; ct 1520 net/ax25/af_ax25.c ct++; ct 1666 net/ax25/af_ax25.c int ct; ct 1669 net/ax25/af_ax25.c for (ct = 0; ct < digi.ndigi; ct++) ct 1670 net/ax25/af_ax25.c fsa->fsa_digipeater[ct] = digi.calls[ct]; ct 116 net/ax25/ax25_addr.c int ct = 0; ct 118 net/ax25/ax25_addr.c while (ct < 6) { ct 119 net/ax25/ax25_addr.c if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */ ct 121 net/ax25/ax25_addr.c ct++; ct 124 net/ax25/ax25_addr.c if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */ ct 219 net/ax25/ax25_addr.c int ct = 0; ct 252 net/ax25/ax25_addr.c while (ct < d->ndigi) { ct 253 net/ax25/ax25_addr.c memcpy(buf, &d->calls[ct], AX25_ADDR_LEN); ct 255 net/ax25/ax25_addr.c if (d->repeated[ct]) ct 265 net/ax25/ax25_addr.c ct++; ct 286 net/ax25/ax25_addr.c int ct; ct 292 net/ax25/ax25_addr.c for (ct = 0; ct < in->ndigi; ct++) { ct 293 net/ax25/ax25_addr.c out->calls[ct] = in->calls[in->ndigi - ct - 1]; ct 295 net/ax25/ax25_addr.c if (ct <= out->lastrepeat) { ct 296 net/ax25/ax25_addr.c out->calls[ct].ax25_call[6] |= AX25_HBIT; ct 297 net/ax25/ax25_addr.c out->repeated[ct] = 1; ct 299 net/ax25/ax25_addr.c out->calls[ct].ax25_call[6] &= ~AX25_HBIT; ct 300 net/ax25/ax25_addr.c out->repeated[ct] = 0; ct 144 net/bridge/netfilter/nf_conntrack_bridge.c const struct nf_conn *ct; ct 150 net/bridge/netfilter/nf_conntrack_bridge.c ct = nf_ct_get(skb, &ctinfo); ct 151 net/bridge/netfilter/nf_conntrack_bridge.c if (ct) ct 152 net/bridge/netfilter/nf_conntrack_bridge.c zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); ct 174 net/bridge/netfilter/nf_conntrack_bridge.c const struct nf_conn *ct; ct 177 net/bridge/netfilter/nf_conntrack_bridge.c ct = nf_ct_get(skb, &ctinfo); ct 178 net/bridge/netfilter/nf_conntrack_bridge.c if (ct) ct 179 net/bridge/netfilter/nf_conntrack_bridge.c zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); ct 234 net/bridge/netfilter/nf_conntrack_bridge.c struct nf_conn *ct; ct 238 net/bridge/netfilter/nf_conntrack_bridge.c ct = nf_ct_get(skb, &ctinfo); ct 239 net/bridge/netfilter/nf_conntrack_bridge.c if ((ct && !nf_ct_is_template(ct)) || ct 361 net/bridge/netfilter/nf_conntrack_bridge.c struct nf_conn *ct; ct 364 net/bridge/netfilter/nf_conntrack_bridge.c ct = nf_ct_get(skb, &ctinfo); ct 365 net/bridge/netfilter/nf_conntrack_bridge.c if (!ct || ctinfo == IP_CT_RELATED_REPLY) ct 385 net/bridge/netfilter/nf_conntrack_bridge.c return nf_confirm(skb, protoff, ct, ctinfo); ct 265 net/core/flow_dissector.c struct nf_conn *ct; ct 270 net/core/flow_dissector.c ct = nf_ct_get(skb, &ctinfo); ct 271 net/core/flow_dissector.c if (!ct) ct 281 net/core/flow_dissector.c key->ct_zone = ct->zone.id; ct 284 net/core/flow_dissector.c key->ct_mark = ct->mark; ct 287 net/core/flow_dissector.c cl = nf_ct_labels_find(ct); ct 1750 net/ipv4/ipmr.c int ct; ct 1757 net/ipv4/ipmr.c for (ct = 0; ct < mrt->maxvif; ct++, v++) { ct 1759 net/ipv4/ipmr.c vif_delete(mrt, ct, 1, NULL); ct 1940 net/ipv4/ipmr.c int ct; ct 1942 net/ipv4/ipmr.c for (ct = mrt->maxvif-1; ct >= 0; ct--) { ct 1943 net/ipv4/ipmr.c if (mrt->vif_table[ct].dev == dev) ct 1946 net/ipv4/ipmr.c return ct; ct 1956 net/ipv4/ipmr.c int vif, ct; ct 2034 net/ipv4/ipmr.c for (ct = c->_c.mfc_un.res.maxvif - 1; ct 2035 net/ipv4/ipmr.c ct >= c->_c.mfc_un.res.minvif; ct--) { ct 2038 net/ipv4/ipmr.c ct != true_vifi) && ct 2039 net/ipv4/ipmr.c ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) { ct 2047 net/ipv4/ipmr.c psend = ct; ct 118 net/ipv4/ipmr_base.c for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { ct 119 net/ipv4/ipmr_base.c if (!VIF_EXISTS(mrt, iter->ct)) ct 122 net/ipv4/ipmr_base.c return &mrt->vif_table[iter->ct]; ct 138 net/ipv4/ipmr_base.c while (++iter->ct < mrt->maxvif) { ct 139 net/ipv4/ipmr_base.c if (!VIF_EXISTS(mrt, iter->ct)) ct 141 net/ipv4/ipmr_base.c return &mrt->vif_table[iter->ct]; ct 215 net/ipv4/ipmr_base.c int ct; ct 235 net/ipv4/ipmr_base.c for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { ct 236 net/ipv4/ipmr_base.c if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { ct 246 net/ipv4/ipmr_base.c nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; ct 247 net/ipv4/ipmr_base.c vif = &mrt->vif_table[ct]; ct 275 net/ipv4/ipmr_base.c int ct; ct 277 net/ipv4/ipmr_base.c for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { ct 278 net/ipv4/ipmr_base.c if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { ct 281 net/ipv4/ipmr_base.c vif = &mrt->vif_table[ct]; ct 400 net/ipv4/netfilter/ipt_CLUSTERIP.c struct nf_conn *ct; ct 408 net/ipv4/netfilter/ipt_CLUSTERIP.c ct = nf_ct_get(skb, &ctinfo); ct 409 net/ipv4/netfilter/ipt_CLUSTERIP.c if (ct == NULL) ct 427 net/ipv4/netfilter/ipt_CLUSTERIP.c ct->mark = hash; ct 442 net/ipv4/netfilter/ipt_CLUSTERIP.c nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 444 net/ipv4/netfilter/ipt_CLUSTERIP.c pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); ct 47 net/ipv4/netfilter/nf_defrag_ipv4.c const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 49 net/ipv4/netfilter/nf_defrag_ipv4.c zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); ct 28 net/ipv4/netfilter/nf_nat_h323.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 41 net/ipv4/netfilter/nf_nat_h323.c if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, ct 55 net/ipv4/netfilter/nf_nat_h323.c if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, ct 92 net/ipv4/netfilter/nf_nat_h323.c static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, ct 97 net/ipv4/netfilter/nf_nat_h323.c const struct nf_ct_h323_master *info = nfct_help_data(ct); ct 104 net/ipv4/netfilter/nf_nat_h323.c if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) { ct 105 net/ipv4/netfilter/nf_nat_h323.c if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && ct 111 net/ipv4/netfilter/nf_nat_h323.c get_h225_addr(ct, *data, &taddr[0], ct 118 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3.ip, ct 122 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir]. ct 125 net/ipv4/netfilter/nf_nat_h323.c } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && ct 130 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.src.u3.ip, ct 134 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir]. ct 145 net/ipv4/netfilter/nf_nat_h323.c static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, ct 156 net/ipv4/netfilter/nf_nat_h323.c if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && ct 157 net/ipv4/netfilter/nf_nat_h323.c addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && ct 158 net/ipv4/netfilter/nf_nat_h323.c port == ct->tuplehash[dir].tuple.src.u.udp.port) { ct 161 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3.ip, ct 162 net/ipv4/netfilter/nf_nat_h323.c ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); ct 164 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 165 net/ipv4/netfilter/nf_nat_h323.c ct->tuplehash[!dir].tuple. ct 174 net/ipv4/netfilter/nf_nat_h323.c static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, ct 182 net/ipv4/netfilter/nf_nat_h323.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 252 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 280 net/ipv4/netfilter/nf_nat_h323.c static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, ct 315 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 331 net/ipv4/netfilter/nf_nat_h323.c static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, ct 337 net/ipv4/netfilter/nf_nat_h323.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 371 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 422 net/ipv4/netfilter/nf_nat_h323.c static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, ct 428 net/ipv4/netfilter/nf_nat_h323.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 463 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 475 net/ipv4/netfilter/nf_nat_h323.c get_h225_addr(ct, *data, &taddr[0], &addr, &port) && ct 478 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 518 net/ipv4/netfilter/nf_nat_h323.c static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, ct 530 net/ipv4/netfilter/nf_nat_h323.c exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip; ct 556 net/ipv4/netfilter/nf_nat_h323.c &ct->tuplehash[!dir].tuple.dst.u3, ct 43 net/ipv4/netfilter/nf_nat_pptp.c static void pptp_nat_expected(struct nf_conn *ct, ct 46 net/ipv4/netfilter/nf_nat_pptp.c struct net *net = nf_ct_net(ct); ct 47 net/ipv4/netfilter/nf_nat_pptp.c const struct nf_conn *master = ct->master; ct 55 net/ipv4/netfilter/nf_nat_pptp.c nat = nf_ct_nat_ext_add(ct); ct 85 net/ipv4/netfilter/nf_nat_pptp.c other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t); ct 95 net/ipv4/netfilter/nf_nat_pptp.c BUG_ON(ct->status & IPS_NAT_DONE_MASK); ct 100 net/ipv4/netfilter/nf_nat_pptp.c = ct->master->tuplehash[!exp->dir].tuple.dst.u3; ct 105 net/ipv4/netfilter/nf_nat_pptp.c nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); ct 110 net/ipv4/netfilter/nf_nat_pptp.c = ct->master->tuplehash[!exp->dir].tuple.src.u3; ct 115 net/ipv4/netfilter/nf_nat_pptp.c nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); ct 121 net/ipv4/netfilter/nf_nat_pptp.c struct nf_conn *ct, ct 129 net/ipv4/netfilter/nf_nat_pptp.c struct nf_conn_nat *nat = nfct_nat(ct); ct 139 net/ipv4/netfilter/nf_nat_pptp.c ct_pptp_info = nfct_help_data(ct); ct 156 net/ipv4/netfilter/nf_nat_pptp.c new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; ct 189 net/ipv4/netfilter/nf_nat_pptp.c if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, ct 202 net/ipv4/netfilter/nf_nat_pptp.c const struct nf_conn *ct = expect_orig->master; ct 203 net/ipv4/netfilter/nf_nat_pptp.c struct nf_conn_nat *nat = nfct_nat(ct); ct 211 net/ipv4/netfilter/nf_nat_pptp.c ct_pptp_info = nfct_help_data(ct); ct 232 net/ipv4/netfilter/nf_nat_pptp.c struct nf_conn *ct, ct 239 net/ipv4/netfilter/nf_nat_pptp.c struct nf_conn_nat *nat = nfct_nat(ct); ct 289 net/ipv4/netfilter/nf_nat_pptp.c if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, ct 126 net/ipv4/netfilter/nf_nat_snmp_basic_main.c static int snmp_translate(struct nf_conn *ct, int dir, struct sk_buff *skb) ct 136 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ctx.from = ct->tuplehash[dir].tuple.src.u3.ip; ct 137 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ctx.to = ct->tuplehash[!dir].tuple.dst.u3.ip; ct 139 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ctx.from = ct->tuplehash[!dir].tuple.src.u3.ip; ct 140 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ctx.to = ct->tuplehash[dir].tuple.dst.u3.ip; ct 150 net/ipv4/netfilter/nf_nat_snmp_basic_main.c nf_ct_helper_log(skb, ct, "parser failed\n"); ct 161 net/ipv4/netfilter/nf_nat_snmp_basic_main.c struct nf_conn *ct, ct 176 net/ipv4/netfilter/nf_nat_snmp_basic_main.c if (!(ct->status & IPS_NAT_MASK)) ct 185 net/ipv4/netfilter/nf_nat_snmp_basic_main.c nf_ct_helper_log(skb, ct, "dropping malformed packet\n"); ct 190 net/ipv4/netfilter/nf_nat_snmp_basic_main.c nf_ct_helper_log(skb, ct, "cannot mangle packet"); ct 195 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ret = snmp_translate(ct, dir, skb); ct 102 net/ipv4/netfilter/nf_socket_ipv4.c struct nf_conn const *ct; ct 139 net/ipv4/netfilter/nf_socket_ipv4.c ct = nf_ct_get(skb, &ctinfo); ct 140 net/ipv4/netfilter/nf_socket_ipv4.c if (ct && ct 145 net/ipv4/netfilter/nf_socket_ipv4.c (ct->status & IPS_SRC_NAT_DONE)) { ct 147 net/ipv4/netfilter/nf_socket_ipv4.c daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; ct 149 net/ipv4/netfilter/nf_socket_ipv4.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port : ct 150 net/ipv4/netfilter/nf_socket_ipv4.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; ct 1236 net/ipv6/ip6mr.c int ct; ct 1243 net/ipv6/ip6mr.c for (ct = 0; ct < mrt->maxvif; ct++, v++) { ct 1245 net/ipv6/ip6mr.c mif6_delete(mrt, ct, 1, NULL); ct 2072 net/ipv6/ip6mr.c int ct; ct 2074 net/ipv6/ip6mr.c for (ct = mrt->maxvif - 1; ct >= 0; ct--) { ct 2075 net/ipv6/ip6mr.c if (mrt->vif_table[ct].dev == dev) ct 2078 net/ipv6/ip6mr.c return ct; ct 2086 net/ipv6/ip6mr.c int vif, ct; ct 2155 net/ipv6/ip6mr.c for (ct = c->_c.mfc_un.res.maxvif - 1; ct 2156 net/ipv6/ip6mr.c ct >= c->_c.mfc_un.res.minvif; ct--) { ct 2158 net/ipv6/ip6mr.c if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) && ct 2159 net/ipv6/ip6mr.c ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) { ct 2165 net/ipv6/ip6mr.c psend = ct; ct 37 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 39 net/ipv6/netfilter/nf_defrag_ipv6_hooks.c zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); ct 804 net/mac80211/mesh.c enum nl80211_channel_type ct; ct 840 net/mac80211/mesh.c ct = cfg80211_get_chandef_type(&csa->settings.chandef); ct 841 net/mac80211/mesh.c if (ct == NL80211_CHAN_HT40PLUS) ct 755 net/netfilter/ipvs/ip_vs_conn.c int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest) ct 757 net/netfilter/ipvs/ip_vs_conn.c struct ip_vs_dest *dest = ct->dest; ct 758 net/netfilter/ipvs/ip_vs_conn.c struct netns_ipvs *ipvs = ct->ipvs; ct 770 net/netfilter/ipvs/ip_vs_conn.c ip_vs_proto_name(ct->protocol), ct 771 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(ct->af, &ct->caddr), ct 772 net/netfilter/ipvs/ip_vs_conn.c ntohs(ct->cport), ct 773 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(ct->af, &ct->vaddr), ct 774 net/netfilter/ipvs/ip_vs_conn.c ntohs(ct->vport), ct 775 net/netfilter/ipvs/ip_vs_conn.c IP_VS_DBG_ADDR(ct->daf, &ct->daddr), ct 776 net/netfilter/ipvs/ip_vs_conn.c ntohs(ct->dport)); ct 781 net/netfilter/ipvs/ip_vs_conn.c if (ct->vport != htons(0xffff)) { ct 782 net/netfilter/ipvs/ip_vs_conn.c if (ip_vs_conn_unhash(ct)) { ct 783 net/netfilter/ipvs/ip_vs_conn.c ct->dport = htons(0xffff); ct 784 net/netfilter/ipvs/ip_vs_conn.c ct->vport = htons(0xffff); ct 785 net/netfilter/ipvs/ip_vs_conn.c ct->cport = 0; ct 786 net/netfilter/ipvs/ip_vs_conn.c ip_vs_conn_hash(ct); ct 794 net/netfilter/ipvs/ip_vs_conn.c __ip_vs_conn_put(ct); ct 823 net/netfilter/ipvs/ip_vs_conn.c struct ip_vs_conn *ct = cp->control; ct 829 net/netfilter/ipvs/ip_vs_conn.c if (ct) { ct 832 net/netfilter/ipvs/ip_vs_conn.c if (!cp->timeout && !atomic_read(&ct->n_control) && ct 833 net/netfilter/ipvs/ip_vs_conn.c (!(ct->flags & IP_VS_CONN_F_TEMPLATE) || ct 834 net/netfilter/ipvs/ip_vs_conn.c !(ct->state & IP_VS_CTPL_S_ASSURED))) { ct 836 net/netfilter/ipvs/ip_vs_conn.c ct->timeout = 0; ct 837 net/netfilter/ipvs/ip_vs_conn.c ip_vs_conn_expire_now(ct); ct 271 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_conn *ct; ct 352 net/netfilter/ipvs/ip_vs_core.c ct = ip_vs_ct_in_get(¶m); ct 353 net/netfilter/ipvs/ip_vs_core.c if (!ct || !ip_vs_check_template(ct, NULL)) { ct 383 net/netfilter/ipvs/ip_vs_core.c ct = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, ct 385 net/netfilter/ipvs/ip_vs_core.c if (ct == NULL) { ct 391 net/netfilter/ipvs/ip_vs_core.c ct->timeout = svc->timeout; ct 394 net/netfilter/ipvs/ip_vs_core.c dest = ct->dest; ct 415 net/netfilter/ipvs/ip_vs_core.c ip_vs_conn_put(ct); ct 423 net/netfilter/ipvs/ip_vs_core.c ip_vs_control_add(cp, ct); ct 424 net/netfilter/ipvs/ip_vs_core.c ip_vs_conn_put(ct); ct 1156 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_conn *ct = NULL, *cp = NULL; ct 1189 net/netfilter/ipvs/ip_vs_core.c ct = ip_vs_ct_in_get(¶m); ct 1191 net/netfilter/ipvs/ip_vs_core.c if (!ct || !ip_vs_check_template(ct, dest)) { ct 1192 net/netfilter/ipvs/ip_vs_core.c ct = ip_vs_conn_new(¶m, dest->af, daddr, dport, ct 1194 net/netfilter/ipvs/ip_vs_core.c if (!ct) { ct 1198 net/netfilter/ipvs/ip_vs_core.c ct->timeout = svc->timeout; ct 1212 net/netfilter/ipvs/ip_vs_core.c if (ct) ct 1213 net/netfilter/ipvs/ip_vs_core.c ip_vs_conn_put(ct); ct 1216 net/netfilter/ipvs/ip_vs_core.c if (ct) { ct 1217 net/netfilter/ipvs/ip_vs_core.c ip_vs_control_add(cp, ct); ct 1218 net/netfilter/ipvs/ip_vs_core.c ip_vs_conn_put(ct); ct 261 net/netfilter/ipvs/ip_vs_ftp.c struct nf_conn *ct; ct 364 net/netfilter/ipvs/ip_vs_ftp.c ct = nf_ct_get(skb, &ctinfo); ct 365 net/netfilter/ipvs/ip_vs_ftp.c if (ct) { ct 374 net/netfilter/ipvs/ip_vs_ftp.c mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, ct 380 net/netfilter/ipvs/ip_vs_ftp.c ip_vs_nfct_expect_related(skb, ct, n_cp, ct 74 net/netfilter/ipvs/ip_vs_nfct.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 77 net/netfilter/ipvs/ip_vs_nfct.c if (ct == NULL || nf_ct_is_confirmed(ct) || ct 78 net/netfilter/ipvs/ip_vs_nfct.c nf_ct_is_dying(ct)) ct 94 net/netfilter/ipvs/ip_vs_nfct.c if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && ct 95 net/netfilter/ipvs/ip_vs_nfct.c !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) ct 104 net/netfilter/ipvs/ip_vs_nfct.c new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 121 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ct->status, ctinfo, ct 122 net/netfilter/ipvs/ip_vs_nfct.c ARG_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)); ct 125 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ct->status, ctinfo, ct 127 net/netfilter/ipvs/ip_vs_nfct.c nf_conntrack_alter_reply(ct, &new_tuple); ct 129 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ARG_CONN(cp)); ct 140 net/netfilter/ipvs/ip_vs_nfct.c static void ip_vs_nfct_expect_callback(struct nf_conn *ct, ct 146 net/netfilter/ipvs/ip_vs_nfct.c struct net *net = nf_ct_net(ct); ct 157 net/netfilter/ipvs/ip_vs_nfct.c orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 166 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ct->status, ARG_CONN(cp)); ct 167 net/netfilter/ipvs/ip_vs_nfct.c new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 170 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ARG_TUPLE(&new_reply)); ct 182 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ct->status, ARG_CONN(cp)); ct 183 net/netfilter/ipvs/ip_vs_nfct.c new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 186 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ARG_TUPLE(&new_reply)); ct 194 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ct->status, ARG_TUPLE(orig)); ct 200 net/netfilter/ipvs/ip_vs_nfct.c nf_conntrack_alter_reply(ct, &new_reply); ct 211 net/netfilter/ipvs/ip_vs_nfct.c void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, ct 217 net/netfilter/ipvs/ip_vs_nfct.c if (ct == NULL) ct 220 net/netfilter/ipvs/ip_vs_nfct.c exp = nf_ct_expect_alloc(ct); ct 224 net/netfilter/ipvs/ip_vs_nfct.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 233 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ARG_TUPLE(&exp->tuple)); ct 245 net/netfilter/ipvs/ip_vs_nfct.c struct nf_conn *ct; ct 264 net/netfilter/ipvs/ip_vs_nfct.c ct = nf_ct_tuplehash_to_ctrack(h); ct 265 net/netfilter/ipvs/ip_vs_nfct.c if (nf_ct_kill(ct)) { ct 268 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ARG_TUPLE(&tuple)); ct 272 net/netfilter/ipvs/ip_vs_nfct.c __func__, ct, ARG_TUPLE(&tuple)); ct 274 net/netfilter/ipvs/ip_vs_nfct.c nf_ct_put(ct); ct 108 net/netfilter/ipvs/ip_vs_pe_sip.c struct ip_vs_conn *ct) ct 113 net/netfilter/ipvs/ip_vs_pe_sip.c if (ct->af == p->af && ct 114 net/netfilter/ipvs/ip_vs_pe_sip.c ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) && ct 118 net/netfilter/ipvs/ip_vs_pe_sip.c p->vaddr, &ct->vaddr) && ct 119 net/netfilter/ipvs/ip_vs_pe_sip.c ct->vport == p->vport && ct 120 net/netfilter/ipvs/ip_vs_pe_sip.c ct->flags & IP_VS_CONN_F_TEMPLATE && ct 121 net/netfilter/ipvs/ip_vs_pe_sip.c ct->protocol == p->protocol && ct 122 net/netfilter/ipvs/ip_vs_pe_sip.c ct->pe_data && ct->pe_data_len == p->pe_data_len && ct 123 net/netfilter/ipvs/ip_vs_pe_sip.c !memcmp(ct->pe_data, p->pe_data, p->pe_data_len)) ct 799 net/netfilter/ipvs/ip_vs_xmit.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 801 net/netfilter/ipvs/ip_vs_xmit.c if (ct) { ct 887 net/netfilter/ipvs/ip_vs_xmit.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 889 net/netfilter/ipvs/ip_vs_xmit.c if (ct) { ct 1554 net/netfilter/ipvs/ip_vs_xmit.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1556 net/netfilter/ipvs/ip_vs_xmit.c if (ct) { ct 1642 net/netfilter/ipvs/ip_vs_xmit.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1644 net/netfilter/ipvs/ip_vs_xmit.c if (ct) { ct 33 net/netfilter/nf_conntrack_acct.c net->ct.sysctl_acct = nf_ct_acct; ct 91 net/netfilter/nf_conntrack_amanda.c struct nf_conn *ct, ct 109 net/netfilter/nf_conntrack_amanda.c nf_ct_refresh(ct, skb, master_timeout * HZ); ct 146 net/netfilter/nf_conntrack_amanda.c exp = nf_ct_expect_alloc(ct); ct 148 net/netfilter/nf_conntrack_amanda.c nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ct 152 net/netfilter/nf_conntrack_amanda.c tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 154 net/netfilter/nf_conntrack_amanda.c nf_ct_l3num(ct), ct 159 net/netfilter/nf_conntrack_amanda.c if (nf_nat_amanda && ct->status & IPS_NAT_MASK) ct 163 net/netfilter/nf_conntrack_amanda.c nf_ct_helper_log(skb, ct, "cannot add expectation"); ct 19 net/netfilter/nf_conntrack_broadcast.c struct nf_conn *ct, ct 27 net/netfilter/nf_conntrack_broadcast.c struct nf_conn_help *help = nfct_help(ct); ct 31 net/netfilter/nf_conntrack_broadcast.c if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk))) ct 56 net/netfilter/nf_conntrack_broadcast.c exp = nf_ct_expect_alloc(ct); ct 60 net/netfilter/nf_conntrack_broadcast.c exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 74 net/netfilter/nf_conntrack_broadcast.c nf_ct_refresh(ct, skb, timeout * HZ); ct 462 net/netfilter/nf_conntrack_core.c u32 nf_ct_get_id(const struct nf_conn *ct) ct 469 net/netfilter/nf_conntrack_core.c a = (unsigned long)ct; ct 470 net/netfilter/nf_conntrack_core.c b = (unsigned long)ct->master; ct 471 net/netfilter/nf_conntrack_core.c c = (unsigned long)nf_ct_net(ct); ct 472 net/netfilter/nf_conntrack_core.c d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, ct 473 net/netfilter/nf_conntrack_core.c sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple), ct 484 net/netfilter/nf_conntrack_core.c clean_from_lists(struct nf_conn *ct) ct 486 net/netfilter/nf_conntrack_core.c pr_debug("clean_from_lists(%p)\n", ct); ct 487 net/netfilter/nf_conntrack_core.c hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); ct 488 net/netfilter/nf_conntrack_core.c hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); ct 491 net/netfilter/nf_conntrack_core.c nf_ct_remove_expectations(ct); ct 495 net/netfilter/nf_conntrack_core.c static void nf_ct_add_to_dying_list(struct nf_conn *ct) ct 500 net/netfilter/nf_conntrack_core.c ct->cpu = smp_processor_id(); ct 501 net/netfilter/nf_conntrack_core.c pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); ct 504 net/netfilter/nf_conntrack_core.c hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, ct 510 net/netfilter/nf_conntrack_core.c static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) ct 515 net/netfilter/nf_conntrack_core.c ct->cpu = smp_processor_id(); ct 516 net/netfilter/nf_conntrack_core.c pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); ct 519 net/netfilter/nf_conntrack_core.c hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, ct 525 net/netfilter/nf_conntrack_core.c static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) ct 530 net/netfilter/nf_conntrack_core.c pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); ct 533 net/netfilter/nf_conntrack_core.c BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); ct 534 net/netfilter/nf_conntrack_core.c hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); ct 585 net/netfilter/nf_conntrack_core.c static void destroy_gre_conntrack(struct nf_conn *ct) ct 588 net/netfilter/nf_conntrack_core.c struct nf_conn *master = ct->master; ct 598 net/netfilter/nf_conntrack_core.c struct nf_conn *ct = (struct nf_conn *)nfct; ct 600 net/netfilter/nf_conntrack_core.c pr_debug("destroy_conntrack(%p)\n", ct); ct 603 net/netfilter/nf_conntrack_core.c if (unlikely(nf_ct_is_template(ct))) { ct 604 net/netfilter/nf_conntrack_core.c nf_ct_tmpl_free(ct); ct 608 net/netfilter/nf_conntrack_core.c if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE)) ct 609 net/netfilter/nf_conntrack_core.c destroy_gre_conntrack(ct); ct 617 net/netfilter/nf_conntrack_core.c nf_ct_remove_expectations(ct); ct 619 net/netfilter/nf_conntrack_core.c nf_ct_del_from_dying_or_unconfirmed_list(ct); ct 623 net/netfilter/nf_conntrack_core.c if (ct->master) ct 624 net/netfilter/nf_conntrack_core.c nf_ct_put(ct->master); ct 626 net/netfilter/nf_conntrack_core.c pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); ct 627 net/netfilter/nf_conntrack_core.c nf_conntrack_free(ct); ct 630 net/netfilter/nf_conntrack_core.c static void nf_ct_delete_from_lists(struct nf_conn *ct) ct 632 net/netfilter/nf_conntrack_core.c struct net *net = nf_ct_net(ct); ct 636 net/netfilter/nf_conntrack_core.c nf_ct_helper_destroy(ct); ct 642 net/netfilter/nf_conntrack_core.c &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 644 net/netfilter/nf_conntrack_core.c &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 647 net/netfilter/nf_conntrack_core.c clean_from_lists(ct); ct 650 net/netfilter/nf_conntrack_core.c nf_ct_add_to_dying_list(ct); ct 655 net/netfilter/nf_conntrack_core.c bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) ct 659 net/netfilter/nf_conntrack_core.c if (test_and_set_bit(IPS_DYING_BIT, &ct->status)) ct 662 net/netfilter/nf_conntrack_core.c tstamp = nf_conn_tstamp_find(ct); ct 666 net/netfilter/nf_conntrack_core.c if (nf_conntrack_event_report(IPCT_DESTROY, ct, ct 671 net/netfilter/nf_conntrack_core.c nf_ct_delete_from_lists(ct); ct 672 net/netfilter/nf_conntrack_core.c nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); ct 676 net/netfilter/nf_conntrack_core.c nf_conntrack_ecache_work(nf_ct_net(ct)); ct 677 net/netfilter/nf_conntrack_core.c nf_ct_delete_from_lists(ct); ct 678 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 689 net/netfilter/nf_conntrack_core.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ct 695 net/netfilter/nf_conntrack_core.c nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && ct 696 net/netfilter/nf_conntrack_core.c nf_ct_is_confirmed(ct) && ct 697 net/netfilter/nf_conntrack_core.c net_eq(net, nf_ct_net(ct)); ct 713 net/netfilter/nf_conntrack_core.c static void nf_ct_gc_expired(struct nf_conn *ct) ct 715 net/netfilter/nf_conntrack_core.c if (!atomic_inc_not_zero(&ct->ct_general.use)) ct 718 net/netfilter/nf_conntrack_core.c if (nf_ct_should_gc(ct)) ct 719 net/netfilter/nf_conntrack_core.c nf_ct_kill(ct); ct 721 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 743 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 745 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 746 net/netfilter/nf_conntrack_core.c if (nf_ct_is_expired(ct)) { ct 747 net/netfilter/nf_conntrack_core.c nf_ct_gc_expired(ct); ct 773 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 782 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 783 net/netfilter/nf_conntrack_core.c if (likely(atomic_inc_not_zero(&ct->ct_general.use))) { ct 788 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 808 net/netfilter/nf_conntrack_core.c static void __nf_conntrack_hash_insert(struct nf_conn *ct, ct 812 net/netfilter/nf_conntrack_core.c hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, ct 814 net/netfilter/nf_conntrack_core.c hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, ct 819 net/netfilter/nf_conntrack_core.c nf_conntrack_hash_check_insert(struct nf_conn *ct) ct 822 net/netfilter/nf_conntrack_core.c struct net *net = nf_ct_net(ct); ct 828 net/netfilter/nf_conntrack_core.c zone = nf_ct_zone(ct); ct 834 net/netfilter/nf_conntrack_core.c &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 836 net/netfilter/nf_conntrack_core.c &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 841 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, ct 846 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, ct 852 net/netfilter/nf_conntrack_core.c atomic_set(&ct->ct_general.use, 2); ct 853 net/netfilter/nf_conntrack_core.c __nf_conntrack_hash_insert(ct, hash, reply_hash); ct 867 net/netfilter/nf_conntrack_core.c static inline void nf_ct_acct_update(struct nf_conn *ct, ct 873 net/netfilter/nf_conntrack_core.c acct = nf_conn_acct_find(ct); ct 882 net/netfilter/nf_conntrack_core.c static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 894 net/netfilter/nf_conntrack_core.c nf_ct_acct_update(ct, ctinfo, bytes); ct 904 net/netfilter/nf_conntrack_core.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ct 909 net/netfilter/nf_conntrack_core.c l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ct 911 net/netfilter/nf_conntrack_core.c !nf_ct_is_dying(ct) && ct 912 net/netfilter/nf_conntrack_core.c atomic_inc_not_zero(&ct->ct_general.use)) { ct 913 net/netfilter/nf_conntrack_core.c if (((ct->status & IPS_NAT_DONE_MASK) == 0) || ct 914 net/netfilter/nf_conntrack_core.c nf_ct_match(ct, loser_ct)) { ct 915 net/netfilter/nf_conntrack_core.c nf_ct_acct_merge(ct, ctinfo, loser_ct); ct 917 net/netfilter/nf_conntrack_core.c nf_ct_set(skb, ct, oldinfo); ct 920 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 933 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 942 net/netfilter/nf_conntrack_core.c ct = nf_ct_get(skb, &ctinfo); ct 943 net/netfilter/nf_conntrack_core.c net = nf_ct_net(ct); ct 952 net/netfilter/nf_conntrack_core.c zone = nf_ct_zone(ct); ct 958 net/netfilter/nf_conntrack_core.c hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; ct 961 net/netfilter/nf_conntrack_core.c &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 975 net/netfilter/nf_conntrack_core.c if (unlikely(nf_ct_is_confirmed(ct))) { ct 982 net/netfilter/nf_conntrack_core.c pr_debug("Confirming conntrack %p\n", ct); ct 988 net/netfilter/nf_conntrack_core.c nf_ct_del_from_dying_or_unconfirmed_list(ct); ct 990 net/netfilter/nf_conntrack_core.c if (unlikely(nf_ct_is_dying(ct))) { ct 991 net/netfilter/nf_conntrack_core.c nf_ct_add_to_dying_list(ct); ct 999 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, ct 1004 net/netfilter/nf_conntrack_core.c if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, ct 1011 net/netfilter/nf_conntrack_core.c ct->timeout += nfct_time_stamp; ct 1012 net/netfilter/nf_conntrack_core.c atomic_inc(&ct->ct_general.use); ct 1013 net/netfilter/nf_conntrack_core.c ct->status |= IPS_CONFIRMED; ct 1016 net/netfilter/nf_conntrack_core.c tstamp = nf_conn_tstamp_find(ct); ct 1025 net/netfilter/nf_conntrack_core.c __nf_conntrack_hash_insert(ct, hash, reply_hash); ct 1029 net/netfilter/nf_conntrack_core.c help = nfct_help(ct); ct 1031 net/netfilter/nf_conntrack_core.c nf_conntrack_event_cache(IPCT_HELPER, ct); ct 1033 net/netfilter/nf_conntrack_core.c nf_conntrack_event_cache(master_ct(ct) ? ct 1034 net/netfilter/nf_conntrack_core.c IPCT_RELATED : IPCT_NEW, ct); ct 1038 net/netfilter/nf_conntrack_core.c nf_ct_add_to_dying_list(ct); ct 1060 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 1070 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 1072 net/netfilter/nf_conntrack_core.c if (ct == ignored_conntrack) ct 1075 net/netfilter/nf_conntrack_core.c if (nf_ct_is_expired(ct)) { ct 1076 net/netfilter/nf_conntrack_core.c nf_ct_gc_expired(ct); ct 1094 net/netfilter/nf_conntrack_core.c &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) ct 1190 net/netfilter/nf_conntrack_core.c static bool gc_worker_skip_ct(const struct nf_conn *ct) ct 1192 net/netfilter/nf_conntrack_core.c return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct); ct 1195 net/netfilter/nf_conntrack_core.c static bool gc_worker_can_early_drop(const struct nf_conn *ct) ct 1199 net/netfilter/nf_conntrack_core.c if (!test_bit(IPS_ASSURED_BIT, &ct->status)) ct 1202 net/netfilter/nf_conntrack_core.c l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ct 1203 net/netfilter/nf_conntrack_core.c if (l4proto->can_early_drop && l4proto->can_early_drop(ct)) ct 1215 net/netfilter/nf_conntrack_core.c static void nf_ct_offload_timeout(struct nf_conn *ct) ct 1217 net/netfilter/nf_conntrack_core.c if (nf_ct_expires(ct) < DAY / 2) ct 1218 net/netfilter/nf_conntrack_core.c ct->timeout = nfct_time_stamp + DAY; ct 1272 net/netfilter/nf_conntrack_core.c if (atomic_read(&net->ct.count) < nf_conntrack_max95) ct 1351 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 1354 net/netfilter/nf_conntrack_core.c atomic_inc(&net->ct.count); ct 1357 net/netfilter/nf_conntrack_core.c unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { ct 1361 net/netfilter/nf_conntrack_core.c atomic_dec(&net->ct.count); ct 1371 net/netfilter/nf_conntrack_core.c ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); ct 1372 net/netfilter/nf_conntrack_core.c if (ct == NULL) ct 1375 net/netfilter/nf_conntrack_core.c spin_lock_init(&ct->lock); ct 1376 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct 1377 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; ct 1378 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; ct 1380 net/netfilter/nf_conntrack_core.c *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct 1381 net/netfilter/nf_conntrack_core.c ct->status = 0; ct 1382 net/netfilter/nf_conntrack_core.c ct->timeout = 0; ct 1383 net/netfilter/nf_conntrack_core.c write_pnet(&ct->ct_net, net); ct 1384 net/netfilter/nf_conntrack_core.c memset(&ct->__nfct_init_offset, 0, ct 1388 net/netfilter/nf_conntrack_core.c nf_ct_zone_add(ct, zone); ct 1393 net/netfilter/nf_conntrack_core.c atomic_set(&ct->ct_general.use, 0); ct 1394 net/netfilter/nf_conntrack_core.c return ct; ct 1396 net/netfilter/nf_conntrack_core.c atomic_dec(&net->ct.count); ct 1410 net/netfilter/nf_conntrack_core.c void nf_conntrack_free(struct nf_conn *ct) ct 1412 net/netfilter/nf_conntrack_core.c struct net *net = nf_ct_net(ct); ct 1417 net/netfilter/nf_conntrack_core.c WARN_ON(atomic_read(&ct->ct_general.use) != 0); ct 1419 net/netfilter/nf_conntrack_core.c nf_ct_ext_destroy(ct); ct 1420 net/netfilter/nf_conntrack_core.c nf_ct_ext_free(ct); ct 1421 net/netfilter/nf_conntrack_core.c kmem_cache_free(nf_conntrack_cachep, ct); ct 1423 net/netfilter/nf_conntrack_core.c atomic_dec(&net->ct.count); ct 1436 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 1451 net/netfilter/nf_conntrack_core.c ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, ct 1453 net/netfilter/nf_conntrack_core.c if (IS_ERR(ct)) ct 1454 net/netfilter/nf_conntrack_core.c return (struct nf_conntrack_tuple_hash *)ct; ct 1456 net/netfilter/nf_conntrack_core.c if (!nf_ct_add_synproxy(ct, tmpl)) { ct 1457 net/netfilter/nf_conntrack_core.c nf_conntrack_free(ct); ct 1464 net/netfilter/nf_conntrack_core.c nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout), ct 1467 net/netfilter/nf_conntrack_core.c nf_ct_acct_ext_add(ct, GFP_ATOMIC); ct 1468 net/netfilter/nf_conntrack_core.c nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); ct 1469 net/netfilter/nf_conntrack_core.c nf_ct_labels_ext_add(ct); ct 1472 net/netfilter/nf_conntrack_core.c nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, ct 1477 net/netfilter/nf_conntrack_core.c if (net->ct.expect_count) { ct 1482 net/netfilter/nf_conntrack_core.c ct, exp); ct 1484 net/netfilter/nf_conntrack_core.c __set_bit(IPS_EXPECTED_BIT, &ct->status); ct 1486 net/netfilter/nf_conntrack_core.c ct->master = exp->master; ct 1488 net/netfilter/nf_conntrack_core.c help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); ct 1494 net/netfilter/nf_conntrack_core.c ct->mark = exp->master->mark; ct 1497 net/netfilter/nf_conntrack_core.c ct->secmark = exp->master->secmark; ct 1504 net/netfilter/nf_conntrack_core.c __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); ct 1507 net/netfilter/nf_conntrack_core.c nf_conntrack_get(&ct->ct_general); ct 1508 net/netfilter/nf_conntrack_core.c nf_ct_add_to_unconfirmed_list(ct); ct 1514 net/netfilter/nf_conntrack_core.c exp->expectfn(ct, exp); ct 1518 net/netfilter/nf_conntrack_core.c return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; ct 1534 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 1556 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 1563 net/netfilter/nf_conntrack_core.c if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { ct 1564 net/netfilter/nf_conntrack_core.c pr_debug("normal packet for %p\n", ct); ct 1566 net/netfilter/nf_conntrack_core.c } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { ct 1567 net/netfilter/nf_conntrack_core.c pr_debug("related packet for %p\n", ct); ct 1570 net/netfilter/nf_conntrack_core.c pr_debug("new packet for %p\n", ct); ct 1574 net/netfilter/nf_conntrack_core.c nf_ct_set(skb, ct, ctinfo); ct 1611 net/netfilter/nf_conntrack_core.c static int generic_packet(struct nf_conn *ct, struct sk_buff *skb, ct 1614 net/netfilter/nf_conntrack_core.c const unsigned int *timeout = nf_ct_timeout_lookup(ct); ct 1617 net/netfilter/nf_conntrack_core.c timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout; ct 1619 net/netfilter/nf_conntrack_core.c nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); ct 1624 net/netfilter/nf_conntrack_core.c static int nf_conntrack_handle_packet(struct nf_conn *ct, ct 1630 net/netfilter/nf_conntrack_core.c switch (nf_ct_protonum(ct)) { ct 1632 net/netfilter/nf_conntrack_core.c return nf_conntrack_tcp_packet(ct, skb, dataoff, ct 1635 net/netfilter/nf_conntrack_core.c return nf_conntrack_udp_packet(ct, skb, dataoff, ct 1638 net/netfilter/nf_conntrack_core.c return nf_conntrack_icmp_packet(ct, skb, ctinfo, state); ct 1641 net/netfilter/nf_conntrack_core.c return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state); ct 1645 net/netfilter/nf_conntrack_core.c return nf_conntrack_udplite_packet(ct, skb, dataoff, ct 1650 net/netfilter/nf_conntrack_core.c return nf_conntrack_sctp_packet(ct, skb, dataoff, ct 1655 net/netfilter/nf_conntrack_core.c return nf_conntrack_dccp_packet(ct, skb, dataoff, ct 1660 net/netfilter/nf_conntrack_core.c return nf_conntrack_gre_packet(ct, skb, dataoff, ct 1665 net/netfilter/nf_conntrack_core.c return generic_packet(ct, skb, ctinfo); ct 1672 net/netfilter/nf_conntrack_core.c struct nf_conn *ct, *tmpl; ct 1718 net/netfilter/nf_conntrack_core.c ct = nf_ct_get(skb, &ctinfo); ct 1719 net/netfilter/nf_conntrack_core.c if (!ct) { ct 1726 net/netfilter/nf_conntrack_core.c ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state); ct 1731 net/netfilter/nf_conntrack_core.c nf_conntrack_put(&ct->ct_general); ct 1747 net/netfilter/nf_conntrack_core.c !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) ct 1748 net/netfilter/nf_conntrack_core.c nf_conntrack_event_cache(IPCT_REPLY, ct); ct 1759 net/netfilter/nf_conntrack_core.c void nf_conntrack_alter_reply(struct nf_conn *ct, ct 1762 net/netfilter/nf_conntrack_core.c struct nf_conn_help *help = nfct_help(ct); ct 1765 net/netfilter/nf_conntrack_core.c WARN_ON(nf_ct_is_confirmed(ct)); ct 1767 net/netfilter/nf_conntrack_core.c pr_debug("Altering reply tuple of %p to ", ct); ct 1770 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; ct 1771 net/netfilter/nf_conntrack_core.c if (ct->master || (help && !hlist_empty(&help->expectations))) ct 1775 net/netfilter/nf_conntrack_core.c __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); ct 1781 net/netfilter/nf_conntrack_core.c void __nf_ct_refresh_acct(struct nf_conn *ct, ct 1788 net/netfilter/nf_conntrack_core.c if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) ct 1792 net/netfilter/nf_conntrack_core.c if (nf_ct_is_confirmed(ct)) ct 1795 net/netfilter/nf_conntrack_core.c if (READ_ONCE(ct->timeout) != extra_jiffies) ct 1796 net/netfilter/nf_conntrack_core.c WRITE_ONCE(ct->timeout, extra_jiffies); ct 1799 net/netfilter/nf_conntrack_core.c nf_ct_acct_update(ct, ctinfo, skb->len); ct 1803 net/netfilter/nf_conntrack_core.c bool nf_ct_kill_acct(struct nf_conn *ct, ct 1807 net/netfilter/nf_conntrack_core.c nf_ct_acct_update(ct, ctinfo, skb->len); ct 1809 net/netfilter/nf_conntrack_core.c return nf_ct_delete(ct, 0, 0); ct 1867 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 1871 net/netfilter/nf_conntrack_core.c ct = nf_ct_get(skb, &ctinfo); ct 1878 net/netfilter/nf_conntrack_core.c nf_ct_set(nskb, ct, ctinfo); ct 1883 net/netfilter/nf_conntrack_core.c struct nf_conn *ct, ct 1894 net/netfilter/nf_conntrack_core.c l3num = nf_ct_l3num(ct); ct 1904 net/netfilter/nf_conntrack_core.c if (ct->status & IPS_SRC_NAT) { ct 1906 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all, ct 1909 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all; ct 1912 net/netfilter/nf_conntrack_core.c if (ct->status & IPS_DST_NAT) { ct 1914 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all, ct 1917 net/netfilter/nf_conntrack_core.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all; ct 1920 net/netfilter/nf_conntrack_core.c h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple); ct 1927 net/netfilter/nf_conntrack_core.c status = ct->status; ct 1929 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 1930 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 1931 net/netfilter/nf_conntrack_core.c nf_ct_set(skb, ct, ctinfo); ct 1938 net/netfilter/nf_conntrack_core.c nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC, ct 1943 net/netfilter/nf_conntrack_core.c nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST, ct 1953 net/netfilter/nf_conntrack_core.c static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, ct 1960 net/netfilter/nf_conntrack_core.c help = nfct_help(ct); ct 1968 net/netfilter/nf_conntrack_core.c switch (nf_ct_l3num(ct)) { ct 1989 net/netfilter/nf_conntrack_core.c if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && ct 1991 net/netfilter/nf_conntrack_core.c if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { ct 1992 net/netfilter/nf_conntrack_core.c NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); ct 2004 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 2007 net/netfilter/nf_conntrack_core.c ct = nf_ct_get(skb, &ctinfo); ct 2008 net/netfilter/nf_conntrack_core.c if (!ct) ct 2011 net/netfilter/nf_conntrack_core.c if (!nf_ct_is_confirmed(ct)) { ct 2012 net/netfilter/nf_conntrack_core.c err = __nf_conntrack_update(net, skb, ct, ctinfo); ct 2017 net/netfilter/nf_conntrack_core.c return nf_confirm_cthelper(skb, ct, ctinfo); ct 2027 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 2029 net/netfilter/nf_conntrack_core.c ct = nf_ct_get(skb, &ctinfo); ct 2030 net/netfilter/nf_conntrack_core.c if (ct) { ct 2031 net/netfilter/nf_conntrack_core.c src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); ct 2047 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(hash); ct 2048 net/netfilter/nf_conntrack_core.c src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); ct 2050 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 2061 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 2073 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 2074 net/netfilter/nf_conntrack_core.c if (iter(ct, data)) ct 2085 net/netfilter/nf_conntrack_core.c atomic_inc(&ct->ct_general.use); ct 2088 net/netfilter/nf_conntrack_core.c return ct; ct 2095 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 2102 net/netfilter/nf_conntrack_core.c while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { ct 2105 net/netfilter/nf_conntrack_core.c nf_ct_delete(ct, portid, report); ct 2106 net/netfilter/nf_conntrack_core.c nf_ct_put(ct); ct 2142 net/netfilter/nf_conntrack_core.c pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); ct 2146 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 2148 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 2153 net/netfilter/nf_conntrack_core.c set_bit(IPS_DYING_BIT, &ct->status); ct 2164 net/netfilter/nf_conntrack_core.c if (atomic_read(&net->ct.count) > 0) { ct 2180 net/netfilter/nf_conntrack_core.c if (atomic_read(&net->ct.count) == 0) ct 2209 net/netfilter/nf_conntrack_core.c if (atomic_read(&net->ct.count) == 0) ct 2290 net/netfilter/nf_conntrack_core.c if (atomic_read(&net->ct.count) != 0) ct 2302 net/netfilter/nf_conntrack_core.c free_percpu(net->ct.stat); ct 2303 net/netfilter/nf_conntrack_core.c free_percpu(net->ct.pcpu_lists); ct 2335 net/netfilter/nf_conntrack_core.c struct nf_conn *ct; ct 2364 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h); ct 2366 net/netfilter/nf_conntrack_core.c bucket = __hash_conntrack(nf_ct_net(ct), ct 2578 net/netfilter/nf_conntrack_core.c atomic_set(&net->ct.count, 0); ct 2580 net/netfilter/nf_conntrack_core.c net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); ct 2581 net/netfilter/nf_conntrack_core.c if (!net->ct.pcpu_lists) ct 2585 net/netfilter/nf_conntrack_core.c struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); ct 2592 net/netfilter/nf_conntrack_core.c net->ct.stat = alloc_percpu(struct ip_conntrack_stat); ct 2593 net/netfilter/nf_conntrack_core.c if (!net->ct.stat) ct 2609 net/netfilter/nf_conntrack_core.c free_percpu(net->ct.stat); ct 2611 net/netfilter/nf_conntrack_core.c free_percpu(net->ct.pcpu_lists); ct 51 net/netfilter/nf_conntrack_ecache.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ct 54 net/netfilter/nf_conntrack_ecache.c if (!nf_ct_is_confirmed(ct)) ct 57 net/netfilter/nf_conntrack_ecache.c e = nf_ct_ecache_find(ct); ct 61 net/netfilter/nf_conntrack_ecache.c if (nf_conntrack_event(IPCT_DESTROY, ct)) { ct 67 net/netfilter/nf_conntrack_ecache.c refs[evicted] = ct; ct 120 net/netfilter/nf_conntrack_ecache.c int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, ct 124 net/netfilter/nf_conntrack_ecache.c struct net *net = nf_ct_net(ct); ct 129 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference(net->ct.nf_conntrack_event_cb); ct 133 net/netfilter/nf_conntrack_ecache.c e = nf_ct_ecache_find(ct); ct 137 net/netfilter/nf_conntrack_ecache.c if (nf_ct_is_confirmed(ct)) { ct 139 net/netfilter/nf_conntrack_ecache.c .ct = ct, ct 151 net/netfilter/nf_conntrack_ecache.c spin_lock_bh(&ct->lock); ct 167 net/netfilter/nf_conntrack_ecache.c spin_unlock_bh(&ct->lock); ct 178 net/netfilter/nf_conntrack_ecache.c void nf_ct_deliver_cached_events(struct nf_conn *ct) ct 180 net/netfilter/nf_conntrack_ecache.c struct net *net = nf_ct_net(ct); ct 188 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference(net->ct.nf_conntrack_event_cb); ct 192 net/netfilter/nf_conntrack_ecache.c e = nf_ct_ecache_find(ct); ct 198 net/netfilter/nf_conntrack_ecache.c if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct)) ct 209 net/netfilter/nf_conntrack_ecache.c item.ct = ct; ct 218 net/netfilter/nf_conntrack_ecache.c spin_lock_bh(&ct->lock); ct 223 net/netfilter/nf_conntrack_ecache.c spin_unlock_bh(&ct->lock); ct 240 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference(net->ct.nf_expect_event_cb); ct 267 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, ct 273 net/netfilter/nf_conntrack_ecache.c rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); ct 288 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, ct 291 net/netfilter/nf_conntrack_ecache.c RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); ct 304 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, ct 310 net/netfilter/nf_conntrack_ecache.c rcu_assign_pointer(net->ct.nf_expect_event_cb, new); ct 325 net/netfilter/nf_conntrack_ecache.c notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, ct 328 net/netfilter/nf_conntrack_ecache.c RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); ct 345 net/netfilter/nf_conntrack_ecache.c net->ct.sysctl_events = nf_ct_events; ct 346 net/netfilter/nf_conntrack_ecache.c INIT_DELAYED_WORK(&net->ct.ecache_dwork, ecache_work); ct 351 net/netfilter/nf_conntrack_ecache.c cancel_delayed_work_sync(&net->ct.ecache_dwork); ct 57 net/netfilter/nf_conntrack_expect.c net->ct.expect_count--; ct 124 net/netfilter/nf_conntrack_expect.c if (!net->ct.expect_count) ct 164 net/netfilter/nf_conntrack_expect.c if (!net->ct.expect_count) ct 212 net/netfilter/nf_conntrack_expect.c void nf_ct_remove_expectations(struct nf_conn *ct) ct 214 net/netfilter/nf_conntrack_expect.c struct nf_conn_help *help = nfct_help(ct); ct 392 net/netfilter/nf_conntrack_expect.c net->ct.expect_count++; ct 461 net/netfilter/nf_conntrack_expect.c if (net->ct.expect_count >= nf_ct_expect_max) { ct 689 net/netfilter/nf_conntrack_expect.c net->ct.expect_count = 0; ct 20 net/netfilter/nf_conntrack_extend.c void nf_ct_ext_destroy(struct nf_conn *ct) ct 34 net/netfilter/nf_conntrack_extend.c t->destroy(ct); ct 40 net/netfilter/nf_conntrack_extend.c void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) ct 47 net/netfilter/nf_conntrack_extend.c WARN_ON(nf_ct_is_confirmed(ct)); ct 49 net/netfilter/nf_conntrack_extend.c old = ct->ext; ct 78 net/netfilter/nf_conntrack_extend.c ct->ext = new; ct 81 net/netfilter/nf_conntrack_extend.c rcu_assign_pointer(ct->ext, new); ct 348 net/netfilter/nf_conntrack_ftp.c static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, ct 375 net/netfilter/nf_conntrack_ftp.c struct nf_conn *ct, ct 386 net/netfilter/nf_conntrack_ftp.c struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct); ct 442 net/netfilter/nf_conntrack_ftp.c cmd.l3num = nf_ct_l3num(ct); ct 443 net/netfilter/nf_conntrack_ftp.c memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, ct 462 net/netfilter/nf_conntrack_ftp.c nf_ct_helper_log(skb, ct, "partial matching of `%s'", ct 475 net/netfilter/nf_conntrack_ftp.c exp = nf_ct_expect_alloc(ct); ct 477 net/netfilter/nf_conntrack_ftp.c nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ct 485 net/netfilter/nf_conntrack_ftp.c daddr = &ct->tuplehash[!dir].tuple.dst.u3; ct 488 net/netfilter/nf_conntrack_ftp.c if ((cmd.l3num == nf_ct_l3num(ct)) && ct 489 net/netfilter/nf_conntrack_ftp.c memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, ct 498 net/netfilter/nf_conntrack_ftp.c &ct->tuplehash[dir].tuple.src.u3.ip); ct 502 net/netfilter/nf_conntrack_ftp.c ct->tuplehash[dir].tuple.src.u3.ip6); ct 517 net/netfilter/nf_conntrack_ftp.c &ct->tuplehash[!dir].tuple.src.u3, daddr, ct 523 net/netfilter/nf_conntrack_ftp.c if (nf_nat_ftp && ct->status & IPS_NAT_MASK) ct 529 net/netfilter/nf_conntrack_ftp.c nf_ct_helper_log(skb, ct, "cannot add expectation"); ct 542 net/netfilter/nf_conntrack_ftp.c update_nl_seq(ct, seq, ct_ftp_info, dir, skb); ct 548 net/netfilter/nf_conntrack_ftp.c static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct) ct 550 net/netfilter/nf_conntrack_ftp.c struct nf_ct_ftp_master *ftp = nfct_help_data(ct); ct 64 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 69 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 74 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 83 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 90 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 97 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 104 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 119 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 122 net/netfilter/nf_conntrack_h323_main.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 221 net/netfilter/nf_conntrack_h323_main.c static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, ct 233 net/netfilter/nf_conntrack_h323_main.c if (nf_ct_l3num(ct) != AF_INET) ct 239 net/netfilter/nf_conntrack_h323_main.c if (nf_ct_l3num(ct) != AF_INET6) ct 255 net/netfilter/nf_conntrack_h323_main.c static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, ct 271 net/netfilter/nf_conntrack_h323_main.c if (!get_h245_addr(ct, *data, taddr, &addr, &port) || ct 272 net/netfilter/nf_conntrack_h323_main.c memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || ct 281 net/netfilter/nf_conntrack_h323_main.c if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) ct 283 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 284 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, ct 285 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 289 net/netfilter/nf_conntrack_h323_main.c if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) { ct 293 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 294 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, ct 295 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 298 net/netfilter/nf_conntrack_h323_main.c if (memcmp(&ct->tuplehash[dir].tuple.src.u3, ct 299 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 300 net/netfilter/nf_conntrack_h323_main.c sizeof(ct->tuplehash[dir].tuple.src.u3)) && ct 302 net/netfilter/nf_conntrack_h323_main.c nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 303 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 305 net/netfilter/nf_conntrack_h323_main.c ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, ct 329 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 343 net/netfilter/nf_conntrack_h323_main.c if (!get_h245_addr(ct, *data, taddr, &addr, &port) || ct 344 net/netfilter/nf_conntrack_h323_main.c memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || ct 349 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 351 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 352 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, ct 353 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 357 net/netfilter/nf_conntrack_h323_main.c if (memcmp(&ct->tuplehash[dir].tuple.src.u3, ct 358 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 359 net/netfilter/nf_conntrack_h323_main.c sizeof(ct->tuplehash[dir].tuple.src.u3)) && ct 361 net/netfilter/nf_conntrack_h323_main.c nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 362 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 364 net/netfilter/nf_conntrack_h323_main.c ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr, ct 380 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 390 net/netfilter/nf_conntrack_h323_main.c ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, ct 399 net/netfilter/nf_conntrack_h323_main.c ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, ct 408 net/netfilter/nf_conntrack_h323_main.c static int process_olc(struct sk_buff *skb, struct nf_conn *ct, ct 421 net/netfilter/nf_conntrack_h323_main.c ret = process_h245_channel(skb, ct, ctinfo, ct 440 net/netfilter/nf_conntrack_h323_main.c process_h245_channel(skb, ct, ctinfo, ct 459 net/netfilter/nf_conntrack_h323_main.c ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff, ct 469 net/netfilter/nf_conntrack_h323_main.c static int process_olca(struct sk_buff *skb, struct nf_conn *ct, ct 487 net/netfilter/nf_conntrack_h323_main.c ret = process_h245_channel(skb, ct, ctinfo, ct 507 net/netfilter/nf_conntrack_h323_main.c ret = expect_rtp_rtcp(skb, ct, ctinfo, ct 517 net/netfilter/nf_conntrack_h323_main.c ret = expect_rtp_rtcp(skb, ct, ctinfo, ct 528 net/netfilter/nf_conntrack_h323_main.c ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff, ct 538 net/netfilter/nf_conntrack_h323_main.c static int process_h245(struct sk_buff *skb, struct nf_conn *ct, ct 547 net/netfilter/nf_conntrack_h323_main.c return process_olc(skb, ct, ctinfo, ct 557 net/netfilter/nf_conntrack_h323_main.c return process_olca(skb, ct, ctinfo, ct 574 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 591 net/netfilter/nf_conntrack_h323_main.c while (get_tpkt_data(skb, protoff, ct, ctinfo, ct 594 net/netfilter/nf_conntrack_h323_main.c nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); ct 608 net/netfilter/nf_conntrack_h323_main.c if (process_h245(skb, ct, ctinfo, protoff, ct 618 net/netfilter/nf_conntrack_h323_main.c nf_ct_helper_log(skb, ct, "cannot process H.245 message"); ct 636 net/netfilter/nf_conntrack_h323_main.c int get_h225_addr(struct nf_conn *ct, unsigned char *data, ct 645 net/netfilter/nf_conntrack_h323_main.c if (nf_ct_l3num(ct) != AF_INET) ct 651 net/netfilter/nf_conntrack_h323_main.c if (nf_ct_l3num(ct) != AF_INET6) ct 667 net/netfilter/nf_conntrack_h323_main.c static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, ct 680 net/netfilter/nf_conntrack_h323_main.c if (!get_h225_addr(ct, *data, taddr, &addr, &port) || ct 681 net/netfilter/nf_conntrack_h323_main.c memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || ct 686 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 688 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 689 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, ct 690 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 694 net/netfilter/nf_conntrack_h323_main.c if (memcmp(&ct->tuplehash[dir].tuple.src.u3, ct 695 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 696 net/netfilter/nf_conntrack_h323_main.c sizeof(ct->tuplehash[dir].tuple.src.u3)) && ct 698 net/netfilter/nf_conntrack_h323_main.c nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 699 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 701 net/netfilter/nf_conntrack_h323_main.c ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr, ct 781 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 792 net/netfilter/nf_conntrack_h323_main.c struct net *net = nf_ct_net(ct); ct 796 net/netfilter/nf_conntrack_h323_main.c if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0) ct 803 net/netfilter/nf_conntrack_h323_main.c callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3, ct 804 net/netfilter/nf_conntrack_h323_main.c nf_ct_l3num(ct))) { ct 810 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 812 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 813 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, &addr, ct 817 net/netfilter/nf_conntrack_h323_main.c if (memcmp(&ct->tuplehash[dir].tuple.src.u3, ct 818 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 819 net/netfilter/nf_conntrack_h323_main.c sizeof(ct->tuplehash[dir].tuple.src.u3)) && ct 821 net/netfilter/nf_conntrack_h323_main.c nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 822 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 824 net/netfilter/nf_conntrack_h323_main.c ret = nat_callforwarding(skb, ct, ctinfo, ct 840 net/netfilter/nf_conntrack_h323_main.c static int process_setup(struct sk_buff *skb, struct nf_conn *ct, ct 856 net/netfilter/nf_conntrack_h323_main.c ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff, ct 864 net/netfilter/nf_conntrack_h323_main.c (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 865 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK && ct 866 net/netfilter/nf_conntrack_h323_main.c get_h225_addr(ct, *data, &setup->destCallSignalAddress, ct 868 net/netfilter/nf_conntrack_h323_main.c memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { ct 870 net/netfilter/nf_conntrack_h323_main.c &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, ct 871 net/netfilter/nf_conntrack_h323_main.c ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); ct 874 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, ct 875 net/netfilter/nf_conntrack_h323_main.c ct->tuplehash[!dir].tuple.src.u.tcp.port); ct 881 net/netfilter/nf_conntrack_h323_main.c (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 882 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK && ct 883 net/netfilter/nf_conntrack_h323_main.c get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, ct 885 net/netfilter/nf_conntrack_h323_main.c memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { ct 887 net/netfilter/nf_conntrack_h323_main.c &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, ct 888 net/netfilter/nf_conntrack_h323_main.c ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); ct 891 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 892 net/netfilter/nf_conntrack_h323_main.c ct->tuplehash[!dir].tuple.dst.u.tcp.port); ct 899 net/netfilter/nf_conntrack_h323_main.c ret = process_olc(skb, ct, ctinfo, ct 911 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, ct 923 net/netfilter/nf_conntrack_h323_main.c ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff, ct 931 net/netfilter/nf_conntrack_h323_main.c ret = process_olc(skb, ct, ctinfo, ct 942 net/netfilter/nf_conntrack_h323_main.c static int process_connect(struct sk_buff *skb, struct nf_conn *ct, ct 954 net/netfilter/nf_conntrack_h323_main.c ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff, ct 962 net/netfilter/nf_conntrack_h323_main.c ret = process_olc(skb, ct, ctinfo, ct 973 net/netfilter/nf_conntrack_h323_main.c static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, ct 985 net/netfilter/nf_conntrack_h323_main.c ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff, ct 993 net/netfilter/nf_conntrack_h323_main.c ret = process_olc(skb, ct, ctinfo, ct 1004 net/netfilter/nf_conntrack_h323_main.c static int process_facility(struct sk_buff *skb, struct nf_conn *ct, ct 1017 net/netfilter/nf_conntrack_h323_main.c return expect_callforwarding(skb, ct, ctinfo, ct 1025 net/netfilter/nf_conntrack_h323_main.c ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff, ct 1033 net/netfilter/nf_conntrack_h323_main.c ret = process_olc(skb, ct, ctinfo, ct 1044 net/netfilter/nf_conntrack_h323_main.c static int process_progress(struct sk_buff *skb, struct nf_conn *ct, ct 1056 net/netfilter/nf_conntrack_h323_main.c ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff, ct 1064 net/netfilter/nf_conntrack_h323_main.c ret = process_olc(skb, ct, ctinfo, ct 1075 net/netfilter/nf_conntrack_h323_main.c static int process_q931(struct sk_buff *skb, struct nf_conn *ct, ct 1086 net/netfilter/nf_conntrack_h323_main.c ret = process_setup(skb, ct, ctinfo, protoff, data, dataoff, ct 1090 net/netfilter/nf_conntrack_h323_main.c ret = process_callproceeding(skb, ct, ctinfo, ct 1096 net/netfilter/nf_conntrack_h323_main.c ret = process_connect(skb, ct, ctinfo, protoff, data, dataoff, ct 1100 net/netfilter/nf_conntrack_h323_main.c ret = process_alerting(skb, ct, ctinfo, protoff, data, dataoff, ct 1104 net/netfilter/nf_conntrack_h323_main.c ret = process_facility(skb, ct, ctinfo, protoff, data, dataoff, ct 1108 net/netfilter/nf_conntrack_h323_main.c ret = process_progress(skb, ct, ctinfo, protoff, data, dataoff, ct 1122 net/netfilter/nf_conntrack_h323_main.c ret = process_h245(skb, ct, ctinfo, ct 1134 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 1151 net/netfilter/nf_conntrack_h323_main.c while (get_tpkt_data(skb, protoff, ct, ctinfo, ct 1154 net/netfilter/nf_conntrack_h323_main.c nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); ct 1167 net/netfilter/nf_conntrack_h323_main.c if (process_q931(skb, ct, ctinfo, protoff, ct 1177 net/netfilter/nf_conntrack_h323_main.c nf_ct_helper_log(skb, ct, "cannot process Q.931 message"); ct 1225 net/netfilter/nf_conntrack_h323_main.c static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, ct 1229 net/netfilter/nf_conntrack_h323_main.c struct net *net = nf_ct_net(ct); ct 1239 net/netfilter/nf_conntrack_h323_main.c exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); ct 1240 net/netfilter/nf_conntrack_h323_main.c if (exp && exp->master == ct) ct 1245 net/netfilter/nf_conntrack_h323_main.c static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, ct 1250 net/netfilter/nf_conntrack_h323_main.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 1261 net/netfilter/nf_conntrack_h323_main.c if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && ct 1262 net/netfilter/nf_conntrack_h323_main.c memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, ct 1271 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 1273 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 1275 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3 : NULL, ct 1276 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 1282 net/netfilter/nf_conntrack_h323_main.c if (nat_q931 && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1283 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { /* Need NAT */ ct 1284 net/netfilter/nf_conntrack_h323_main.c ret = nat_q931(skb, ct, ctinfo, protoff, data, ct 1302 net/netfilter/nf_conntrack_h323_main.c static int process_grq(struct sk_buff *skb, struct nf_conn *ct, ct 1312 net/netfilter/nf_conntrack_h323_main.c if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1313 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) /* NATed */ ct 1314 net/netfilter/nf_conntrack_h323_main.c return set_ras_addr(skb, ct, ctinfo, protoff, data, ct 1319 net/netfilter/nf_conntrack_h323_main.c static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, ct 1332 net/netfilter/nf_conntrack_h323_main.c if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port)) ct 1336 net/netfilter/nf_conntrack_h323_main.c if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && ct 1337 net/netfilter/nf_conntrack_h323_main.c port == ct->tuplehash[dir].tuple.src.u.udp.port) ct 1341 net/netfilter/nf_conntrack_h323_main.c if (test_bit(IPS_EXPECTED_BIT, &ct->status)) ct 1345 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 1347 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 1348 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, &addr, ct 1363 net/netfilter/nf_conntrack_h323_main.c static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, ct 1368 net/netfilter/nf_conntrack_h323_main.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 1374 net/netfilter/nf_conntrack_h323_main.c ret = expect_q931(skb, ct, ctinfo, protoff, data, ct 1381 net/netfilter/nf_conntrack_h323_main.c if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1382 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 1383 net/netfilter/nf_conntrack_h323_main.c ret = set_ras_addr(skb, ct, ctinfo, protoff, data, ct 1399 net/netfilter/nf_conntrack_h323_main.c static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, ct 1404 net/netfilter/nf_conntrack_h323_main.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 1413 net/netfilter/nf_conntrack_h323_main.c if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1414 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 1415 net/netfilter/nf_conntrack_h323_main.c ret = set_sig_addr(skb, ct, ctinfo, protoff, data, ct 1430 net/netfilter/nf_conntrack_h323_main.c nf_ct_refresh(ct, skb, info->timeout * HZ); ct 1434 net/netfilter/nf_conntrack_h323_main.c exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, ct 1450 net/netfilter/nf_conntrack_h323_main.c static int process_urq(struct sk_buff *skb, struct nf_conn *ct, ct 1455 net/netfilter/nf_conntrack_h323_main.c struct nf_ct_h323_master *info = nfct_help_data(ct); ct 1463 net/netfilter/nf_conntrack_h323_main.c if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1464 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 1465 net/netfilter/nf_conntrack_h323_main.c ret = set_sig_addr(skb, ct, ctinfo, protoff, data, ct 1473 net/netfilter/nf_conntrack_h323_main.c nf_ct_remove_expectations(ct); ct 1478 net/netfilter/nf_conntrack_h323_main.c nf_ct_refresh(ct, skb, 30 * HZ); ct 1483 net/netfilter/nf_conntrack_h323_main.c static int process_arq(struct sk_buff *skb, struct nf_conn *ct, ct 1488 net/netfilter/nf_conntrack_h323_main.c const struct nf_ct_h323_master *info = nfct_help_data(ct); ct 1498 net/netfilter/nf_conntrack_h323_main.c get_h225_addr(ct, *data, &arq->destCallSignalAddress, ct 1500 net/netfilter/nf_conntrack_h323_main.c !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && ct 1502 net/netfilter/nf_conntrack_h323_main.c nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1503 net/netfilter/nf_conntrack_h323_main.c set_h225_addr && ct->status & IPS_NAT_MASK) { ct 1507 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 1512 net/netfilter/nf_conntrack_h323_main.c get_h225_addr(ct, *data, &arq->srcCallSignalAddress, ct 1514 net/netfilter/nf_conntrack_h323_main.c !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && ct 1515 net/netfilter/nf_conntrack_h323_main.c set_h225_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1516 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 1520 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.dst.u3, ct 1527 net/netfilter/nf_conntrack_h323_main.c static int process_acf(struct sk_buff *skb, struct nf_conn *ct, ct 1541 net/netfilter/nf_conntrack_h323_main.c if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress, ct 1545 net/netfilter/nf_conntrack_h323_main.c if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { ct 1548 net/netfilter/nf_conntrack_h323_main.c if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1549 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) ct 1550 net/netfilter/nf_conntrack_h323_main.c return set_sig_addr(skb, ct, ctinfo, protoff, data, ct 1556 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 1558 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 1559 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, &addr, ct 1575 net/netfilter/nf_conntrack_h323_main.c static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, ct 1585 net/netfilter/nf_conntrack_h323_main.c if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1586 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) ct 1587 net/netfilter/nf_conntrack_h323_main.c return set_ras_addr(skb, ct, ctinfo, protoff, data, ct 1592 net/netfilter/nf_conntrack_h323_main.c static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, ct 1605 net/netfilter/nf_conntrack_h323_main.c if (!get_h225_addr(ct, *data, &lcf->callSignalAddress, ct 1610 net/netfilter/nf_conntrack_h323_main.c if ((exp = nf_ct_expect_alloc(ct)) == NULL) ct 1612 net/netfilter/nf_conntrack_h323_main.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 1613 net/netfilter/nf_conntrack_h323_main.c &ct->tuplehash[!dir].tuple.src.u3, &addr, ct 1631 net/netfilter/nf_conntrack_h323_main.c static int process_irr(struct sk_buff *skb, struct nf_conn *ct, ct 1643 net/netfilter/nf_conntrack_h323_main.c if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1644 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 1645 net/netfilter/nf_conntrack_h323_main.c ret = set_ras_addr(skb, ct, ctinfo, protoff, data, ct 1652 net/netfilter/nf_conntrack_h323_main.c if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && ct 1653 net/netfilter/nf_conntrack_h323_main.c ct->status & IPS_NAT_MASK) { ct 1654 net/netfilter/nf_conntrack_h323_main.c ret = set_sig_addr(skb, ct, ctinfo, protoff, data, ct 1664 net/netfilter/nf_conntrack_h323_main.c static int process_ras(struct sk_buff *skb, struct nf_conn *ct, ct 1671 net/netfilter/nf_conntrack_h323_main.c return process_grq(skb, ct, ctinfo, protoff, data, ct 1674 net/netfilter/nf_conntrack_h323_main.c return process_gcf(skb, ct, ctinfo, protoff, data, ct 1677 net/netfilter/nf_conntrack_h323_main.c return process_rrq(skb, ct, ctinfo, protoff, data, ct 1680 net/netfilter/nf_conntrack_h323_main.c return process_rcf(skb, ct, ctinfo, protoff, data, ct 1683 net/netfilter/nf_conntrack_h323_main.c return process_urq(skb, ct, ctinfo, protoff, data, ct 1686 net/netfilter/nf_conntrack_h323_main.c return process_arq(skb, ct, ctinfo, protoff, data, ct 1689 net/netfilter/nf_conntrack_h323_main.c return process_acf(skb, ct, ctinfo, protoff, data, ct 1692 net/netfilter/nf_conntrack_h323_main.c return process_lrq(skb, ct, ctinfo, protoff, data, ct 1695 net/netfilter/nf_conntrack_h323_main.c return process_lcf(skb, ct, ctinfo, protoff, data, ct 1698 net/netfilter/nf_conntrack_h323_main.c return process_irr(skb, ct, ctinfo, protoff, data, ct 1709 net/netfilter/nf_conntrack_h323_main.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 1725 net/netfilter/nf_conntrack_h323_main.c nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); ct 1737 net/netfilter/nf_conntrack_h323_main.c if (process_ras(skb, ct, ctinfo, protoff, &data, &ras) < 0) ct 1746 net/netfilter/nf_conntrack_h323_main.c nf_ct_helper_log(skb, ct, "cannot process RAS message"); ct 199 net/netfilter/nf_conntrack_helper.c nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) ct 203 net/netfilter/nf_conntrack_helper.c help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp); ct 213 net/netfilter/nf_conntrack_helper.c nf_ct_lookup_helper(struct nf_conn *ct, struct net *net) ct 215 net/netfilter/nf_conntrack_helper.c if (!net->ct.sysctl_auto_assign_helper) { ct 216 net/netfilter/nf_conntrack_helper.c if (net->ct.auto_assign_helper_warned) ct 218 net/netfilter/nf_conntrack_helper.c if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)) ct 224 net/netfilter/nf_conntrack_helper.c net->ct.auto_assign_helper_warned = 1; ct 228 net/netfilter/nf_conntrack_helper.c return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 232 net/netfilter/nf_conntrack_helper.c int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, ct 237 net/netfilter/nf_conntrack_helper.c struct net *net = nf_ct_net(ct); ct 245 net/netfilter/nf_conntrack_helper.c if (test_bit(IPS_HELPER_BIT, &ct->status)) ct 252 net/netfilter/nf_conntrack_helper.c set_bit(IPS_HELPER_BIT, &ct->status); ct 256 net/netfilter/nf_conntrack_helper.c help = nfct_help(ct); ct 259 net/netfilter/nf_conntrack_helper.c helper = nf_ct_lookup_helper(ct, net); ct 268 net/netfilter/nf_conntrack_helper.c help = nf_ct_helper_ext_add(ct, flags); ct 290 net/netfilter/nf_conntrack_helper.c static int unhelp(struct nf_conn *ct, void *me) ct 292 net/netfilter/nf_conntrack_helper.c struct nf_conn_help *help = nfct_help(ct); ct 295 net/netfilter/nf_conntrack_helper.c nf_conntrack_event(IPCT_HELPER, ct); ct 303 net/netfilter/nf_conntrack_helper.c void nf_ct_helper_destroy(struct nf_conn *ct) ct 305 net/netfilter/nf_conntrack_helper.c struct nf_conn_help *help = nfct_help(ct); ct 312 net/netfilter/nf_conntrack_helper.c helper->destroy(ct); ct 370 net/netfilter/nf_conntrack_helper.c void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, ct 384 net/netfilter/nf_conntrack_helper.c help = nfct_help(ct); ct 389 net/netfilter/nf_conntrack_helper.c nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, ct 484 net/netfilter/nf_conntrack_helper.c struct nf_conn *ct, ct 487 net/netfilter/nf_conntrack_helper.c struct nf_conn *ct), ct 560 net/netfilter/nf_conntrack_helper.c net->ct.auto_assign_helper_warned = false; ct 561 net/netfilter/nf_conntrack_helper.c net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper; ct 107 net/netfilter/nf_conntrack_irc.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 187 net/netfilter/nf_conntrack_irc.c tuple = &ct->tuplehash[dir].tuple; ct 196 net/netfilter/nf_conntrack_irc.c exp = nf_ct_expect_alloc(ct); ct 198 net/netfilter/nf_conntrack_irc.c nf_ct_helper_log(skb, ct, ct 203 net/netfilter/nf_conntrack_irc.c tuple = &ct->tuplehash[!dir].tuple; ct 211 net/netfilter/nf_conntrack_irc.c if (nf_nat_irc && ct->status & IPS_NAT_MASK) ct 217 net/netfilter/nf_conntrack_irc.c nf_ct_helper_log(skb, ct, ct 30 net/netfilter/nf_conntrack_labels.c int nf_connlabels_replace(struct nf_conn *ct, ct 39 net/netfilter/nf_conntrack_labels.c labels = nf_ct_labels_find(ct); ct 56 net/netfilter/nf_conntrack_labels.c nf_conntrack_event_cache(IPCT_LABEL, ct); ct 67 net/netfilter/nf_conntrack_labels.c net->ct.labels_used++; ct 77 net/netfilter/nf_conntrack_labels.c net->ct.labels_used--; ct 40 net/netfilter/nf_conntrack_netbios_ns.c struct nf_conn *ct, ct 43 net/netfilter/nf_conntrack_netbios_ns.c return nf_conntrack_broadcast_help(skb, ct, ctinfo, timeout); ct 158 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) ct 160 net/netfilter/nf_conntrack_netlink.c if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) ct 168 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) ct 170 net/netfilter/nf_conntrack_netlink.c long timeout = nf_ct_expires(ct) / HZ; ct 180 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) ct 186 net/netfilter/nf_conntrack_netlink.c l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ct 194 net/netfilter/nf_conntrack_netlink.c ret = l4proto->to_nlattr(skb, nest_proto, ct); ct 205 net/netfilter/nf_conntrack_netlink.c const struct nf_conn *ct) ct 208 net/netfilter/nf_conntrack_netlink.c const struct nf_conn_help *help = nfct_help(ct); ct 225 net/netfilter/nf_conntrack_netlink.c helper->to_nlattr(skb, ct); ct 271 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type) ct 273 net/netfilter/nf_conntrack_netlink.c struct nf_conn_acct *acct = nf_conn_acct_find(ct); ct 287 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) ct 292 net/netfilter/nf_conntrack_netlink.c tstamp = nf_conn_tstamp_find(ct); ct 315 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) ct 317 net/netfilter/nf_conntrack_netlink.c if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) ct 329 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) ct 335 net/netfilter/nf_conntrack_netlink.c ret = security_secid_to_secctx(ct->secmark, &secctx, &len); ct 358 net/netfilter/nf_conntrack_netlink.c static inline int ctnetlink_label_size(const struct nf_conn *ct) ct 360 net/netfilter/nf_conntrack_netlink.c struct nf_conn_labels *labels = nf_ct_labels_find(ct); ct 368 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) ct 370 net/netfilter/nf_conntrack_netlink.c struct nf_conn_labels *labels = nf_ct_labels_find(ct); ct 391 net/netfilter/nf_conntrack_netlink.c #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) ct 393 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) ct 397 net/netfilter/nf_conntrack_netlink.c if (!(ct->status & IPS_EXPECTED)) ct 403 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) ct 438 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct) ct 440 net/netfilter/nf_conntrack_netlink.c struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); ct 443 net/netfilter/nf_conntrack_netlink.c if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj) ct 446 net/netfilter/nf_conntrack_netlink.c spin_lock_bh(&ct->lock); ct 455 net/netfilter/nf_conntrack_netlink.c spin_unlock_bh(&ct->lock); ct 458 net/netfilter/nf_conntrack_netlink.c spin_unlock_bh(&ct->lock); ct 462 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct) ct 464 net/netfilter/nf_conntrack_netlink.c struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); ct 487 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) ct 489 net/netfilter/nf_conntrack_netlink.c __be32 id = (__force __be32)nf_ct_get_id(ct); ct 499 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) ct 501 net/netfilter/nf_conntrack_netlink.c if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)))) ct 511 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct) ct 525 net/netfilter/nf_conntrack_netlink.c nfmsg->nfgen_family = nf_ct_l3num(ct); ct 529 net/netfilter/nf_conntrack_netlink.c zone = nf_ct_zone(ct); ct 534 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) ct 544 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) ct 555 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_status(skb, ct) < 0 || ct 556 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_acct(skb, ct, type) < 0 || ct 557 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_timestamp(skb, ct) < 0 || ct 558 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_helpinfo(skb, ct) < 0 || ct 559 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_mark(skb, ct) < 0 || ct 560 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_secctx(skb, ct) < 0 || ct 561 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_labels(skb, ct) < 0 || ct 562 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_id(skb, ct) < 0 || ct 563 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_use(skb, ct) < 0 || ct 564 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_master(skb, ct) < 0 || ct 565 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_ct_seq_adj(skb, ct) < 0 || ct 566 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_ct_synproxy(skb, ct) < 0) ct 569 net/netfilter/nf_conntrack_netlink.c if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) && ct 570 net/netfilter/nf_conntrack_netlink.c (ctnetlink_dump_timeout(skb, ct) < 0 || ct 571 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_protoinfo(skb, ct) < 0)) ct 591 net/netfilter/nf_conntrack_netlink.c static size_t ctnetlink_proto_size(const struct nf_conn *ct) ct 599 net/netfilter/nf_conntrack_netlink.c l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ct 610 net/netfilter/nf_conntrack_netlink.c static inline size_t ctnetlink_acct_size(const struct nf_conn *ct) ct 612 net/netfilter/nf_conntrack_netlink.c if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) ct 620 net/netfilter/nf_conntrack_netlink.c static inline int ctnetlink_secctx_size(const struct nf_conn *ct) ct 625 net/netfilter/nf_conntrack_netlink.c ret = security_secid_to_secctx(ct->secmark, NULL, &len); ct 636 net/netfilter/nf_conntrack_netlink.c static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) ct 639 net/netfilter/nf_conntrack_netlink.c if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) ct 648 net/netfilter/nf_conntrack_netlink.c static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) ct 657 net/netfilter/nf_conntrack_netlink.c + ctnetlink_acct_size(ct) ct 658 net/netfilter/nf_conntrack_netlink.c + ctnetlink_timestamp_size(ct) ct 663 net/netfilter/nf_conntrack_netlink.c + ctnetlink_secctx_size(ct) ct 674 net/netfilter/nf_conntrack_netlink.c + ctnetlink_proto_size(ct) ct 675 net/netfilter/nf_conntrack_netlink.c + ctnetlink_label_size(ct) ct 687 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct = item->ct; ct 706 net/netfilter/nf_conntrack_netlink.c net = nf_ct_net(ct); ct 710 net/netfilter/nf_conntrack_netlink.c skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); ct 720 net/netfilter/nf_conntrack_netlink.c nfmsg->nfgen_family = nf_ct_l3num(ct); ct 724 net/netfilter/nf_conntrack_netlink.c zone = nf_ct_zone(ct); ct 729 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) ct 739 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) ct 750 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_id(skb, ct) < 0) ct 753 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_status(skb, ct) < 0) ct 757 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_acct(skb, ct, type) < 0 || ct 758 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_timestamp(skb, ct) < 0) ct 761 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_timeout(skb, ct) < 0) ct 765 net/netfilter/nf_conntrack_netlink.c && ctnetlink_dump_protoinfo(skb, ct) < 0) ct 768 net/netfilter/nf_conntrack_netlink.c if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) ct 769 net/netfilter/nf_conntrack_netlink.c && ctnetlink_dump_helpinfo(skb, ct) < 0) ct 773 net/netfilter/nf_conntrack_netlink.c if ((events & (1 << IPCT_SECMARK) || ct->secmark) ct 774 net/netfilter/nf_conntrack_netlink.c && ctnetlink_dump_secctx(skb, ct) < 0) ct 778 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_labels(skb, ct) < 0) ct 782 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_master(skb, ct) < 0) ct 786 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_ct_seq_adj(skb, ct) < 0) ct 790 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_ct_synproxy(skb, ct) < 0) ct 795 net/netfilter/nf_conntrack_netlink.c if ((events & (1 << IPCT_MARK) || ct->mark) ct 796 net/netfilter/nf_conntrack_netlink.c && ctnetlink_dump_mark(skb, ct) < 0) ct 877 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_filter_match(struct nf_conn *ct, void *data) ct 888 net/netfilter/nf_conntrack_netlink.c if (filter->family && nf_ct_l3num(ct) != filter->family) ct 892 net/netfilter/nf_conntrack_netlink.c if ((ct->mark & filter->mark.mask) != filter->mark.val) ct 907 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct, *last; ct 937 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 938 net/netfilter/nf_conntrack_netlink.c if (nf_ct_is_expired(ct)) { ct 940 net/netfilter/nf_conntrack_netlink.c atomic_inc_not_zero(&ct->ct_general.use)) ct 941 net/netfilter/nf_conntrack_netlink.c nf_ct_evict[i++] = ct; ct 945 net/netfilter/nf_conntrack_netlink.c if (!net_eq(net, nf_ct_net(ct))) ct 949 net/netfilter/nf_conntrack_netlink.c if (ct != last) ct 953 net/netfilter/nf_conntrack_netlink.c if (!ctnetlink_filter_match(ct, cb->data)) ct 961 net/netfilter/nf_conntrack_netlink.c ct); ct 964 net/netfilter/nf_conntrack_netlink.c nf_conntrack_get(&ct->ct_general); ct 965 net/netfilter/nf_conntrack_netlink.c cb->args[1] = (unsigned long)ct; ct 1225 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) ct 1227 net/netfilter/nf_conntrack_netlink.c if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) ct 1230 net/netfilter/nf_conntrack_netlink.c return ctnetlink_filter_match(ct, data); ct 1260 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct; ct 1290 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 1292 net/netfilter/nf_conntrack_netlink.c if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) { ct 1293 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 1300 net/netfilter/nf_conntrack_netlink.c if (id != (__force __be32)nf_ct_get_id(ct)) { ct 1301 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 1306 net/netfilter/nf_conntrack_netlink.c nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh)); ct 1307 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 1320 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct; ct 1358 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 1363 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 1369 net/netfilter/nf_conntrack_netlink.c NFNL_MSG_TYPE(nlh->nlmsg_type), ct); ct 1371 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 1398 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct, *last; ct 1419 net/netfilter/nf_conntrack_netlink.c pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); ct 1424 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 1425 net/netfilter/nf_conntrack_netlink.c if (l3proto && nf_ct_l3num(ct) != l3proto) ct 1428 net/netfilter/nf_conntrack_netlink.c if (ct != last) ct 1436 net/netfilter/nf_conntrack_netlink.c ct); ct 1439 net/netfilter/nf_conntrack_netlink.c if (!atomic_inc_not_zero(&ct->ct_general.use)) ct 1442 net/netfilter/nf_conntrack_netlink.c cb->args[1] = (unsigned long)ct; ct 1509 net/netfilter/nf_conntrack_netlink.c ctnetlink_parse_nat_setup(struct nf_conn *ct, ct 1535 net/netfilter/nf_conntrack_netlink.c err = nat_hook->parse_nat_setup(ct, manip, attr); ct 1540 net/netfilter/nf_conntrack_netlink.c if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) { ct 1556 net/netfilter/nf_conntrack_netlink.c __ctnetlink_change_status(struct nf_conn *ct, unsigned long on, ct 1567 net/netfilter/nf_conntrack_netlink.c set_bit(bit, &ct->status); ct 1569 net/netfilter/nf_conntrack_netlink.c clear_bit(bit, &ct->status); ct 1574 net/netfilter/nf_conntrack_netlink.c ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) ct 1578 net/netfilter/nf_conntrack_netlink.c d = ct->status ^ status; ct 1592 net/netfilter/nf_conntrack_netlink.c __ctnetlink_change_status(ct, status, 0); ct 1597 net/netfilter/nf_conntrack_netlink.c ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) ct 1605 net/netfilter/nf_conntrack_netlink.c ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, ct 1610 net/netfilter/nf_conntrack_netlink.c return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, ct 1619 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_change_helper(struct nf_conn *ct, ct 1623 net/netfilter/nf_conntrack_netlink.c struct nf_conn_help *help = nfct_help(ct); ct 1633 net/netfilter/nf_conntrack_netlink.c if (ct->master) { ct 1653 net/netfilter/nf_conntrack_netlink.c nf_ct_remove_expectations(ct); ct 1661 net/netfilter/nf_conntrack_netlink.c helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), ct 1662 net/netfilter/nf_conntrack_netlink.c nf_ct_protonum(ct)); ct 1672 net/netfilter/nf_conntrack_netlink.c helper->from_nlattr(helpinfo, ct); ct 1685 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_change_timeout(struct nf_conn *ct, ct 1692 net/netfilter/nf_conntrack_netlink.c ct->timeout = nfct_time_stamp + (u32)timeout; ct 1694 net/netfilter/nf_conntrack_netlink.c if (test_bit(IPS_DYING_BIT, &ct->status)) ct 1701 net/netfilter/nf_conntrack_netlink.c static void ctnetlink_change_mark(struct nf_conn *ct, ct 1710 net/netfilter/nf_conntrack_netlink.c newmark = (ct->mark & mask) ^ mark; ct 1711 net/netfilter/nf_conntrack_netlink.c if (newmark != ct->mark) ct 1712 net/netfilter/nf_conntrack_netlink.c ct->mark = newmark; ct 1722 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_change_protoinfo(struct nf_conn *ct, ct 1735 net/netfilter/nf_conntrack_netlink.c l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ct 1737 net/netfilter/nf_conntrack_netlink.c err = l4proto->from_nlattr(tb, ct); ct 1781 net/netfilter/nf_conntrack_netlink.c ctnetlink_change_seq_adj(struct nf_conn *ct, ct 1784 net/netfilter/nf_conntrack_netlink.c struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); ct 1790 net/netfilter/nf_conntrack_netlink.c spin_lock_bh(&ct->lock); ct 1797 net/netfilter/nf_conntrack_netlink.c set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); ct 1806 net/netfilter/nf_conntrack_netlink.c set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); ct 1809 net/netfilter/nf_conntrack_netlink.c spin_unlock_bh(&ct->lock); ct 1812 net/netfilter/nf_conntrack_netlink.c spin_unlock_bh(&ct->lock); ct 1822 net/netfilter/nf_conntrack_netlink.c static int ctnetlink_change_synproxy(struct nf_conn *ct, ct 1825 net/netfilter/nf_conntrack_netlink.c struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); ct 1851 net/netfilter/nf_conntrack_netlink.c ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[]) ct 1869 net/netfilter/nf_conntrack_netlink.c return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len); ct 1876 net/netfilter/nf_conntrack_netlink.c ctnetlink_change_conntrack(struct nf_conn *ct, ct 1886 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_helper(ct, cda); ct 1892 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_timeout(ct, cda); ct 1898 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_status(ct, cda); ct 1904 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_protoinfo(ct, cda); ct 1911 net/netfilter/nf_conntrack_netlink.c ctnetlink_change_mark(ct, cda); ct 1915 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_seq_adj(ct, cda); ct 1921 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_synproxy(ct, cda); ct 1927 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_attach_labels(ct, cda); ct 1943 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct; ct 1949 net/netfilter/nf_conntrack_netlink.c ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); ct 1950 net/netfilter/nf_conntrack_netlink.c if (IS_ERR(ct)) ct 1959 net/netfilter/nf_conntrack_netlink.c ct->timeout = (u32)timeout + nfct_time_stamp; ct 1970 net/netfilter/nf_conntrack_netlink.c helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), ct 1971 net/netfilter/nf_conntrack_netlink.c nf_ct_protonum(ct)); ct 1982 net/netfilter/nf_conntrack_netlink.c nf_ct_l3num(ct), ct 1983 net/netfilter/nf_conntrack_netlink.c nf_ct_protonum(ct)); ct 1995 net/netfilter/nf_conntrack_netlink.c help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); ct 2002 net/netfilter/nf_conntrack_netlink.c helper->from_nlattr(helpinfo, ct); ct 2009 net/netfilter/nf_conntrack_netlink.c err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); ct 2014 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_setup_nat(ct, cda); ct 2018 net/netfilter/nf_conntrack_netlink.c nf_ct_acct_ext_add(ct, GFP_ATOMIC); ct 2019 net/netfilter/nf_conntrack_netlink.c nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); ct 2020 net/netfilter/nf_conntrack_netlink.c nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); ct 2021 net/netfilter/nf_conntrack_netlink.c nf_ct_labels_ext_add(ct); ct 2022 net/netfilter/nf_conntrack_netlink.c nfct_seqadj_ext_add(ct); ct 2023 net/netfilter/nf_conntrack_netlink.c nfct_synproxy_ext_add(ct); ct 2026 net/netfilter/nf_conntrack_netlink.c ct->status |= IPS_CONFIRMED; ct 2029 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_status(ct, cda); ct 2035 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_seq_adj(ct, cda); ct 2040 net/netfilter/nf_conntrack_netlink.c memset(&ct->proto, 0, sizeof(ct->proto)); ct 2042 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_protoinfo(ct, cda); ct 2048 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_synproxy(ct, cda); ct 2055 net/netfilter/nf_conntrack_netlink.c ctnetlink_change_mark(ct, cda); ct 2075 net/netfilter/nf_conntrack_netlink.c __set_bit(IPS_EXPECTED_BIT, &ct->status); ct 2076 net/netfilter/nf_conntrack_netlink.c ct->master = master_ct; ct 2078 net/netfilter/nf_conntrack_netlink.c tstamp = nf_conn_tstamp_find(ct); ct 2082 net/netfilter/nf_conntrack_netlink.c err = nf_conntrack_hash_check_insert(ct); ct 2088 net/netfilter/nf_conntrack_netlink.c return ct; ct 2093 net/netfilter/nf_conntrack_netlink.c nf_conntrack_free(ct); ct 2106 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct; ct 2144 net/netfilter/nf_conntrack_netlink.c ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, ct 2146 net/netfilter/nf_conntrack_netlink.c if (IS_ERR(ct)) ct 2147 net/netfilter/nf_conntrack_netlink.c return PTR_ERR(ct); ct 2150 net/netfilter/nf_conntrack_netlink.c if (test_bit(IPS_EXPECTED_BIT, &ct->status)) ct 2156 net/netfilter/nf_conntrack_netlink.c ctnetlink_attach_labels(ct, cda) == 0) ct 2167 net/netfilter/nf_conntrack_netlink.c ct, NETLINK_CB(skb).portid, ct 2169 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 2177 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 2179 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_conntrack(ct, cda); ct 2189 net/netfilter/nf_conntrack_netlink.c ct, NETLINK_CB(skb).portid, ct 2194 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 2254 net/netfilter/nf_conntrack_netlink.c st = per_cpu_ptr(net->ct.stat, cpu); ct 2289 net/netfilter/nf_conntrack_netlink.c unsigned int nr_conntracks = atomic_read(&net->ct.count); ct 2364 net/netfilter/nf_conntrack_netlink.c ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct, ct 2371 net/netfilter/nf_conntrack_netlink.c ctnetlink_glue_build_size(const struct nf_conn *ct) ct 2383 net/netfilter/nf_conntrack_netlink.c + ctnetlink_secctx_size(ct) ct 2394 net/netfilter/nf_conntrack_netlink.c + ctnetlink_proto_size(ct) ct 2404 net/netfilter/nf_conntrack_netlink.c static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) ct 2409 net/netfilter/nf_conntrack_netlink.c zone = nf_ct_zone(ct); ct 2414 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) ct 2424 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) ct 2435 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_id(skb, ct) < 0) ct 2438 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_status(skb, ct) < 0) ct 2441 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_timeout(skb, ct) < 0) ct 2444 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_protoinfo(skb, ct) < 0) ct 2447 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_helpinfo(skb, ct) < 0) ct 2451 net/netfilter/nf_conntrack_netlink.c if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0) ct 2454 net/netfilter/nf_conntrack_netlink.c if (ct->master && ctnetlink_dump_master(skb, ct) < 0) ct 2457 net/netfilter/nf_conntrack_netlink.c if ((ct->status & IPS_SEQ_ADJUST) && ct 2458 net/netfilter/nf_conntrack_netlink.c ctnetlink_dump_ct_seq_adj(skb, ct) < 0) ct 2461 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_ct_synproxy(skb, ct) < 0) ct 2465 net/netfilter/nf_conntrack_netlink.c if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) ct 2468 net/netfilter/nf_conntrack_netlink.c if (ctnetlink_dump_labels(skb, ct) < 0) ct 2477 net/netfilter/nf_conntrack_netlink.c ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct, ct 2487 net/netfilter/nf_conntrack_netlink.c if (__ctnetlink_glue_build(skb, ct) < 0) ct 2502 net/netfilter/nf_conntrack_netlink.c ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[]) ct 2505 net/netfilter/nf_conntrack_netlink.c unsigned long d = ct->status ^ status; ct 2521 net/netfilter/nf_conntrack_netlink.c __ctnetlink_change_status(ct, status, ~status); ct 2526 net/netfilter/nf_conntrack_netlink.c ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct) ct 2531 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_timeout(ct, cda); ct 2536 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_update_status(ct, cda); ct 2541 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_helper(ct, cda); ct 2546 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_attach_labels(ct, cda); ct 2552 net/netfilter/nf_conntrack_netlink.c ctnetlink_change_mark(ct, cda); ct 2559 net/netfilter/nf_conntrack_netlink.c ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct) ct 2569 net/netfilter/nf_conntrack_netlink.c return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct); ct 2573 net/netfilter/nf_conntrack_netlink.c const struct nf_conn *ct, ct 2580 net/netfilter/nf_conntrack_netlink.c nf_ct_l3num(ct), NULL); ct 2585 net/netfilter/nf_conntrack_netlink.c nf_ct_l3num(ct), NULL); ct 2589 net/netfilter/nf_conntrack_netlink.c ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, ct 2604 net/netfilter/nf_conntrack_netlink.c ct, &tuple, &mask); ct 2611 net/netfilter/nf_conntrack_netlink.c helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), ct 2612 net/netfilter/nf_conntrack_netlink.c nf_ct_protonum(ct)); ct 2617 net/netfilter/nf_conntrack_netlink.c exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct, ct 2627 net/netfilter/nf_conntrack_netlink.c static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, ct 2630 net/netfilter/nf_conntrack_netlink.c if (!(ct->status & IPS_NAT_MASK)) ct 2633 net/netfilter/nf_conntrack_netlink.c nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff); ct 2944 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct = cb->data; ct 2945 net/netfilter/nf_conntrack_netlink.c struct nf_conn_help *help = nfct_help(ct); ct 2996 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct; ct 3016 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 3018 net/netfilter/nf_conntrack_netlink.c if (!nfct_help(ct)) { ct 3019 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 3023 net/netfilter/nf_conntrack_netlink.c c.data = ct; ct 3026 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 3246 net/netfilter/nf_conntrack_netlink.c ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, ct 3256 net/netfilter/nf_conntrack_netlink.c help = nfct_help(ct); ct 3265 net/netfilter/nf_conntrack_netlink.c exp = nf_ct_expect_alloc(ct); ct 3289 net/netfilter/nf_conntrack_netlink.c exp->master = ct; ct 3297 net/netfilter/nf_conntrack_netlink.c exp, nf_ct_l3num(ct)); ct 3317 net/netfilter/nf_conntrack_netlink.c struct nf_conn *ct; ct 3338 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h); ct 3345 net/netfilter/nf_conntrack_netlink.c nf_ct_protonum(ct)); ct 3355 net/netfilter/nf_conntrack_netlink.c nf_ct_protonum(ct)); ct 3367 net/netfilter/nf_conntrack_netlink.c exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); ct 3378 net/netfilter/nf_conntrack_netlink.c nf_ct_put(ct); ct 3477 net/netfilter/nf_conntrack_netlink.c st = per_cpu_ptr(net->ct.stat, cpu); ct 50 net/netfilter/nf_conntrack_pptp.c struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 57 net/netfilter/nf_conntrack_pptp.c struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 69 net/netfilter/nf_conntrack_pptp.c (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, ct 111 net/netfilter/nf_conntrack_pptp.c static void pptp_expectfn(struct nf_conn *ct, ct 114 net/netfilter/nf_conntrack_pptp.c struct net *net = nf_ct_net(ct); ct 119 net/netfilter/nf_conntrack_pptp.c ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; ct 120 net/netfilter/nf_conntrack_pptp.c ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; ct 126 net/netfilter/nf_conntrack_pptp.c if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK) ct 127 net/netfilter/nf_conntrack_pptp.c nf_nat_pptp_expectfn(ct, exp); ct 137 net/netfilter/nf_conntrack_pptp.c exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); ct 149 net/netfilter/nf_conntrack_pptp.c static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, ct 160 net/netfilter/nf_conntrack_pptp.c zone = nf_ct_zone(ct); ct 183 net/netfilter/nf_conntrack_pptp.c static void pptp_destroy_siblings(struct nf_conn *ct) ct 185 net/netfilter/nf_conntrack_pptp.c struct net *net = nf_ct_net(ct); ct 186 net/netfilter/nf_conntrack_pptp.c const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); ct 189 net/netfilter/nf_conntrack_pptp.c nf_ct_gre_keymap_destroy(ct); ct 192 net/netfilter/nf_conntrack_pptp.c memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); ct 196 net/netfilter/nf_conntrack_pptp.c if (!destroy_sibling_or_exp(net, ct, &t)) ct 200 net/netfilter/nf_conntrack_pptp.c memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); ct 204 net/netfilter/nf_conntrack_pptp.c if (!destroy_sibling_or_exp(net, ct, &t)) ct 209 net/netfilter/nf_conntrack_pptp.c static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) ct 216 net/netfilter/nf_conntrack_pptp.c exp_orig = nf_ct_expect_alloc(ct); ct 220 net/netfilter/nf_conntrack_pptp.c exp_reply = nf_ct_expect_alloc(ct); ct 227 net/netfilter/nf_conntrack_pptp.c nf_ct_l3num(ct), ct 228 net/netfilter/nf_conntrack_pptp.c &ct->tuplehash[dir].tuple.src.u3, ct 229 net/netfilter/nf_conntrack_pptp.c &ct->tuplehash[dir].tuple.dst.u3, ct 236 net/netfilter/nf_conntrack_pptp.c nf_ct_l3num(ct), ct 237 net/netfilter/nf_conntrack_pptp.c &ct->tuplehash[dir].tuple.src.u3, ct 238 net/netfilter/nf_conntrack_pptp.c &ct->tuplehash[dir].tuple.dst.u3, ct 243 net/netfilter/nf_conntrack_pptp.c if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) ct 251 net/netfilter/nf_conntrack_pptp.c if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0) ct 253 net/netfilter/nf_conntrack_pptp.c if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) { ct 254 net/netfilter/nf_conntrack_pptp.c nf_ct_gre_keymap_destroy(ct); ct 278 net/netfilter/nf_conntrack_pptp.c struct nf_conn *ct, ct 281 net/netfilter/nf_conntrack_pptp.c struct nf_ct_pptp_master *info = nfct_help_data(ct); ct 328 net/netfilter/nf_conntrack_pptp.c exp_gre(ct, cid, pcid); ct 362 net/netfilter/nf_conntrack_pptp.c exp_gre(ct, cid, pcid); ct 372 net/netfilter/nf_conntrack_pptp.c pptp_destroy_siblings(ct); ct 387 net/netfilter/nf_conntrack_pptp.c if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK) ct 388 net/netfilter/nf_conntrack_pptp.c return nf_nat_pptp_inbound(skb, ct, ctinfo, ct 406 net/netfilter/nf_conntrack_pptp.c struct nf_conn *ct, ct 409 net/netfilter/nf_conntrack_pptp.c struct nf_ct_pptp_master *info = nfct_help_data(ct); ct 483 net/netfilter/nf_conntrack_pptp.c if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK) ct 484 net/netfilter/nf_conntrack_pptp.c return nf_nat_pptp_outbound(skb, ct, ctinfo, ct 516 net/netfilter/nf_conntrack_pptp.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 520 net/netfilter/nf_conntrack_pptp.c const struct nf_ct_pptp_master *info = nfct_help_data(ct); ct 534 net/netfilter/nf_conntrack_pptp.c if (!nf_ct_is_confirmed(ct) && (ct->status & IPS_NAT_MASK)) { ct 535 net/netfilter/nf_conntrack_pptp.c struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); ct 537 net/netfilter/nf_conntrack_pptp.c if (!nat && !nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC)) ct 592 net/netfilter/nf_conntrack_pptp.c ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct, ct 596 net/netfilter/nf_conntrack_pptp.c ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct, ct 59 net/netfilter/nf_conntrack_proto.c if (net->ct.sysctl_log_invalid != protonum && ct 60 net/netfilter/nf_conntrack_proto.c net->ct.sysctl_log_invalid != IPPROTO_RAW) ct 75 net/netfilter/nf_conntrack_proto.c const struct nf_conn *ct, ct 82 net/netfilter/nf_conntrack_proto.c net = nf_ct_net(ct); ct 83 net/netfilter/nf_conntrack_proto.c if (likely(net->ct.sysctl_log_invalid == 0)) ct 90 net/netfilter/nf_conntrack_proto.c nf_l4proto_log_invalid(skb, net, nf_ct_l3num(ct), ct 91 net/netfilter/nf_conntrack_proto.c nf_ct_protonum(ct), "%pV", &vaf); ct 125 net/netfilter/nf_conntrack_proto.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 129 net/netfilter/nf_conntrack_proto.c help = nfct_help(ct); ct 139 net/netfilter/nf_conntrack_proto.c ct, ctinfo); ct 145 net/netfilter/nf_conntrack_proto.c if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && ct 147 net/netfilter/nf_conntrack_proto.c if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { ct 148 net/netfilter/nf_conntrack_proto.c NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); ct 163 net/netfilter/nf_conntrack_proto.c struct nf_conn *ct; ct 165 net/netfilter/nf_conntrack_proto.c ct = nf_ct_get(skb, &ctinfo); ct 166 net/netfilter/nf_conntrack_proto.c if (!ct || ctinfo == IP_CT_RELATED_REPLY) ct 171 net/netfilter/nf_conntrack_proto.c ct, ctinfo); ct 272 net/netfilter/nf_conntrack_proto.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ct 275 net/netfilter/nf_conntrack_proto.c sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] ct 277 net/netfilter/nf_conntrack_proto.c sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] ct 283 net/netfilter/nf_conntrack_proto.c nf_ct_put(ct); ct 312 net/netfilter/nf_conntrack_proto.c struct nf_conn *ct; ct 341 net/netfilter/nf_conntrack_proto.c ct = nf_ct_tuplehash_to_ctrack(h); ct 344 net/netfilter/nf_conntrack_proto.c sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; ct 347 net/netfilter/nf_conntrack_proto.c &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6, ct 350 net/netfilter/nf_conntrack_proto.c nf_ct_put(ct); ct 367 net/netfilter/nf_conntrack_proto.c struct nf_conn *ct; ct 373 net/netfilter/nf_conntrack_proto.c ct = nf_ct_get(skb, &ctinfo); ct 374 net/netfilter/nf_conntrack_proto.c if (!ct || ctinfo == IP_CT_RELATED_REPLY) ct 384 net/netfilter/nf_conntrack_proto.c return nf_confirm(skb, protoff, ct, ctinfo); ct 429 net/netfilter/nf_conntrack_proto.c static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto) ct 433 net/netfilter/nf_conntrack_proto.c if (nf_ct_l3num(ct) != nfproto) ct 436 net/netfilter/nf_conntrack_proto.c if (nf_ct_protonum(ct) == IPPROTO_TCP && ct 437 net/netfilter/nf_conntrack_proto.c ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) { ct 438 net/netfilter/nf_conntrack_proto.c ct->proto.tcp.seen[0].td_maxwin = 0; ct 439 net/netfilter/nf_conntrack_proto.c ct->proto.tcp.seen[1].td_maxwin = 0; ct 384 net/netfilter/nf_conntrack_proto_dccp.c dccp_new(struct nf_conn *ct, const struct sk_buff *skb, ct 387 net/netfilter/nf_conntrack_proto_dccp.c struct net *net = nf_ct_net(ct); ct 407 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct 408 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; ct 409 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.state = CT_DCCP_NONE; ct 410 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST; ct 411 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL; ct 412 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.handshake_seq = 0; ct 416 net/netfilter/nf_conntrack_proto_dccp.c nf_ct_l4proto_log_invalid(skb, ct, "%s", msg); ct 453 net/netfilter/nf_conntrack_proto_dccp.c state->net->ct.sysctl_checksum && ct 471 net/netfilter/nf_conntrack_proto_dccp.c int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb, ct 490 net/netfilter/nf_conntrack_proto_dccp.c if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh)) ct 494 net/netfilter/nf_conntrack_proto_dccp.c !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { ct 496 net/netfilter/nf_conntrack_proto_dccp.c nf_ct_kill_acct(ct, ctinfo, skb); ct 500 net/netfilter/nf_conntrack_proto_dccp.c spin_lock_bh(&ct->lock); ct 502 net/netfilter/nf_conntrack_proto_dccp.c role = ct->proto.dccp.role[dir]; ct 503 net/netfilter/nf_conntrack_proto_dccp.c old_state = ct->proto.dccp.state; ct 512 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT; ct 513 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER; ct 518 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh); ct 523 net/netfilter/nf_conntrack_proto_dccp.c dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq) ct 524 net/netfilter/nf_conntrack_proto_dccp.c set_bit(IPS_ASSURED_BIT, &ct->status); ct 532 net/netfilter/nf_conntrack_proto_dccp.c if (ct->proto.dccp.last_dir == !dir && ct 533 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST && ct 535 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT; ct 536 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER; ct 537 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh); ct 541 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_dir = dir; ct 542 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_pkt = type; ct 544 net/netfilter/nf_conntrack_proto_dccp.c spin_unlock_bh(&ct->lock); ct 545 net/netfilter/nf_conntrack_proto_dccp.c nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid packet"); ct 548 net/netfilter/nf_conntrack_proto_dccp.c spin_unlock_bh(&ct->lock); ct 549 net/netfilter/nf_conntrack_proto_dccp.c nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid state transition"); ct 553 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_dir = dir; ct 554 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.last_pkt = type; ct 555 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.state = new_state; ct 556 net/netfilter/nf_conntrack_proto_dccp.c spin_unlock_bh(&ct->lock); ct 559 net/netfilter/nf_conntrack_proto_dccp.c nf_conntrack_event_cache(IPCT_PROTOINFO, ct); ct 561 net/netfilter/nf_conntrack_proto_dccp.c timeouts = nf_ct_timeout_lookup(ct); ct 563 net/netfilter/nf_conntrack_proto_dccp.c timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout; ct 564 net/netfilter/nf_conntrack_proto_dccp.c nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); ct 569 net/netfilter/nf_conntrack_proto_dccp.c static bool dccp_can_early_drop(const struct nf_conn *ct) ct 571 net/netfilter/nf_conntrack_proto_dccp.c switch (ct->proto.dccp.state) { ct 584 net/netfilter/nf_conntrack_proto_dccp.c static void dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct) ct 586 net/netfilter/nf_conntrack_proto_dccp.c seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); ct 592 net/netfilter/nf_conntrack_proto_dccp.c struct nf_conn *ct) ct 596 net/netfilter/nf_conntrack_proto_dccp.c spin_lock_bh(&ct->lock); ct 600 net/netfilter/nf_conntrack_proto_dccp.c if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) || ct 602 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || ct 604 net/netfilter/nf_conntrack_proto_dccp.c cpu_to_be64(ct->proto.dccp.handshake_seq), ct 608 net/netfilter/nf_conntrack_proto_dccp.c spin_unlock_bh(&ct->lock); ct 612 net/netfilter/nf_conntrack_proto_dccp.c spin_unlock_bh(&ct->lock); ct 629 net/netfilter/nf_conntrack_proto_dccp.c static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) ct 650 net/netfilter/nf_conntrack_proto_dccp.c spin_lock_bh(&ct->lock); ct 651 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); ct 653 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct 654 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; ct 656 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER; ct 657 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT; ct 660 net/netfilter/nf_conntrack_proto_dccp.c ct->proto.dccp.handshake_seq = ct 663 net/netfilter/nf_conntrack_proto_dccp.c spin_unlock_bh(&ct->lock); ct 55 net/netfilter/nf_conntrack_proto_gre.c return &net->ct.nf_ct_proto.gre; ct 102 net/netfilter/nf_conntrack_proto_gre.c int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, ct 105 net/netfilter/nf_conntrack_proto_gre.c struct net *net = nf_ct_net(ct); ct 107 net/netfilter/nf_conntrack_proto_gre.c struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); ct 118 net/netfilter/nf_conntrack_proto_gre.c dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); ct 140 net/netfilter/nf_conntrack_proto_gre.c void nf_ct_gre_keymap_destroy(struct nf_conn *ct) ct 142 net/netfilter/nf_conntrack_proto_gre.c struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); ct 145 net/netfilter/nf_conntrack_proto_gre.c pr_debug("entering for ct %p\n", ct); ct 201 net/netfilter/nf_conntrack_proto_gre.c static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct) ct 204 net/netfilter/nf_conntrack_proto_gre.c (ct->proto.gre.timeout / HZ), ct 205 net/netfilter/nf_conntrack_proto_gre.c (ct->proto.gre.stream_timeout / HZ)); ct 215 net/netfilter/nf_conntrack_proto_gre.c int nf_conntrack_gre_packet(struct nf_conn *ct, ct 224 net/netfilter/nf_conntrack_proto_gre.c if (!nf_ct_is_confirmed(ct)) { ct 225 net/netfilter/nf_conntrack_proto_gre.c unsigned int *timeouts = nf_ct_timeout_lookup(ct); ct 228 net/netfilter/nf_conntrack_proto_gre.c timeouts = gre_get_timeouts(nf_ct_net(ct)); ct 232 net/netfilter/nf_conntrack_proto_gre.c ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED]; ct 233 net/netfilter/nf_conntrack_proto_gre.c ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED]; ct 238 net/netfilter/nf_conntrack_proto_gre.c if (ct->status & IPS_SEEN_REPLY) { ct 239 net/netfilter/nf_conntrack_proto_gre.c nf_ct_refresh_acct(ct, ctinfo, skb, ct 240 net/netfilter/nf_conntrack_proto_gre.c ct->proto.gre.stream_timeout); ct 242 net/netfilter/nf_conntrack_proto_gre.c if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) ct 243 net/netfilter/nf_conntrack_proto_gre.c nf_conntrack_event_cache(IPCT_ASSURED, ct); ct 245 net/netfilter/nf_conntrack_proto_gre.c nf_ct_refresh_acct(ct, ctinfo, skb, ct 246 net/netfilter/nf_conntrack_proto_gre.c ct->proto.gre.timeout); ct 68 net/netfilter/nf_conntrack_proto_icmp.c int nf_conntrack_icmp_packet(struct nf_conn *ct, ct 76 net/netfilter/nf_conntrack_proto_icmp.c unsigned int *timeout = nf_ct_timeout_lookup(ct); ct 87 net/netfilter/nf_conntrack_proto_icmp.c if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) || ct 88 net/netfilter/nf_conntrack_proto_icmp.c !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) { ct 91 net/netfilter/nf_conntrack_proto_icmp.c ct->tuplehash[0].tuple.dst.u.icmp.type); ct 92 net/netfilter/nf_conntrack_proto_icmp.c nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple); ct 97 net/netfilter/nf_conntrack_proto_icmp.c timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout; ct 99 net/netfilter/nf_conntrack_proto_icmp.c nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); ct 116 net/netfilter/nf_conntrack_proto_icmp.c struct nf_conn *ct; ct 166 net/netfilter/nf_conntrack_proto_icmp.c ct = nf_ct_tuplehash_to_ctrack(h); ct 168 net/netfilter/nf_conntrack_proto_icmp.c ct_daddr = &ct->tuplehash[dir].tuple.dst.u3; ct 181 net/netfilter/nf_conntrack_proto_icmp.c nf_ct_put(ct); ct 190 net/netfilter/nf_conntrack_proto_icmp.c nf_ct_set(skb, ct, ctinfo); ct 219 net/netfilter/nf_conntrack_proto_icmp.c if (state->net->ct.sysctl_checksum && ct 85 net/netfilter/nf_conntrack_proto_icmpv6.c int nf_conntrack_icmpv6_packet(struct nf_conn *ct, ct 90 net/netfilter/nf_conntrack_proto_icmpv6.c unsigned int *timeout = nf_ct_timeout_lookup(ct); ct 99 net/netfilter/nf_conntrack_proto_icmpv6.c if (!nf_ct_is_confirmed(ct)) { ct 100 net/netfilter/nf_conntrack_proto_icmpv6.c int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128; ct 106 net/netfilter/nf_conntrack_proto_icmpv6.c nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); ct 112 net/netfilter/nf_conntrack_proto_icmpv6.c timeout = icmpv6_get_timeouts(nf_ct_net(ct)); ct 117 net/netfilter/nf_conntrack_proto_icmpv6.c nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); ct 148 net/netfilter/nf_conntrack_proto_icmpv6.c state->net->ct.sysctl_checksum && ct 148 net/netfilter/nf_conntrack_proto_sctp.c static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) ct 150 net/netfilter/nf_conntrack_proto_sctp.c seq_printf(s, "%s ", sctp_conntrack_names[ct->proto.sctp.state]); ct 161 net/netfilter/nf_conntrack_proto_sctp.c static int do_basic_checks(struct nf_conn *ct, ct 270 net/netfilter/nf_conntrack_proto_sctp.c sctp_new(struct nf_conn *ct, const struct sk_buff *skb, ct 278 net/netfilter/nf_conntrack_proto_sctp.c memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); ct 306 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; ct 310 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; ct 316 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; ct 319 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.state = SCTP_CONNTRACK_NONE; ct 337 net/netfilter/nf_conntrack_proto_sctp.c state->net->ct.sysctl_checksum && ct 357 net/netfilter/nf_conntrack_proto_sctp.c int nf_conntrack_sctp_packet(struct nf_conn *ct, ct 380 net/netfilter/nf_conntrack_proto_sctp.c if (do_basic_checks(ct, skb, dataoff, map) != 0) ct 383 net/netfilter/nf_conntrack_proto_sctp.c if (!nf_ct_is_confirmed(ct)) { ct 390 net/netfilter/nf_conntrack_proto_sctp.c if (!sctp_new(ct, skb, sh, dataoff)) ct 402 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag != ct->proto.sctp.vtag[dir]) { ct 408 net/netfilter/nf_conntrack_proto_sctp.c spin_lock_bh(&ct->lock); ct 417 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != ct->proto.sctp.vtag[dir] && ct 418 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag != ct->proto.sctp.vtag[!dir]) ct 422 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != ct->proto.sctp.vtag[dir] && ct 423 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag != ct->proto.sctp.vtag[!dir] && ct 428 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != ct->proto.sctp.vtag[dir]) ct 432 net/netfilter/nf_conntrack_proto_sctp.c if (ct->proto.sctp.vtag[dir] == 0) { ct 435 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[dir] = sh->vtag; ct 436 net/netfilter/nf_conntrack_proto_sctp.c } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { ct 442 net/netfilter/nf_conntrack_proto_sctp.c old_state = ct->proto.sctp.state; ct 464 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[!dir] = ih->init_tag; ct 467 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.state = new_state; ct 469 net/netfilter/nf_conntrack_proto_sctp.c nf_conntrack_event_cache(IPCT_PROTOINFO, ct); ct 471 net/netfilter/nf_conntrack_proto_sctp.c spin_unlock_bh(&ct->lock); ct 473 net/netfilter/nf_conntrack_proto_sctp.c timeouts = nf_ct_timeout_lookup(ct); ct 475 net/netfilter/nf_conntrack_proto_sctp.c timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; ct 477 net/netfilter/nf_conntrack_proto_sctp.c nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); ct 483 net/netfilter/nf_conntrack_proto_sctp.c set_bit(IPS_ASSURED_BIT, &ct->status); ct 484 net/netfilter/nf_conntrack_proto_sctp.c nf_conntrack_event_cache(IPCT_ASSURED, ct); ct 490 net/netfilter/nf_conntrack_proto_sctp.c spin_unlock_bh(&ct->lock); ct 495 net/netfilter/nf_conntrack_proto_sctp.c static bool sctp_can_early_drop(const struct nf_conn *ct) ct 497 net/netfilter/nf_conntrack_proto_sctp.c switch (ct->proto.sctp.state) { ct 515 net/netfilter/nf_conntrack_proto_sctp.c struct nf_conn *ct) ct 519 net/netfilter/nf_conntrack_proto_sctp.c spin_lock_bh(&ct->lock); ct 524 net/netfilter/nf_conntrack_proto_sctp.c if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) || ct 526 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) || ct 528 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_REPLY])) ct 531 net/netfilter/nf_conntrack_proto_sctp.c spin_unlock_bh(&ct->lock); ct 538 net/netfilter/nf_conntrack_proto_sctp.c spin_unlock_bh(&ct->lock); ct 553 net/netfilter/nf_conntrack_proto_sctp.c static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) ct 573 net/netfilter/nf_conntrack_proto_sctp.c spin_lock_bh(&ct->lock); ct 574 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); ct 575 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = ct 577 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ct 579 net/netfilter/nf_conntrack_proto_sctp.c spin_unlock_bh(&ct->lock); ct 274 net/netfilter/nf_conntrack_proto_tcp.c static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) ct 276 net/netfilter/nf_conntrack_proto_tcp.c if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) ct 279 net/netfilter/nf_conntrack_proto_tcp.c seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]); ct 461 net/netfilter/nf_conntrack_proto_tcp.c static bool tcp_in_window(const struct nf_conn *ct, ct 469 net/netfilter/nf_conntrack_proto_tcp.c struct net *net = nf_ct_net(ct); ct 473 net/netfilter/nf_conntrack_proto_tcp.c const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; ct 492 net/netfilter/nf_conntrack_proto_tcp.c receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1); ct 678 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_l4proto_log_invalid(skb, ct, ct 741 net/netfilter/nf_conntrack_proto_tcp.c if (state->net->ct.sysctl_checksum && ct 758 net/netfilter/nf_conntrack_proto_tcp.c static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, ct 763 net/netfilter/nf_conntrack_proto_tcp.c struct net *net = nf_ct_net(ct); ct 765 net/netfilter/nf_conntrack_proto_tcp.c const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0]; ct 766 net/netfilter/nf_conntrack_proto_tcp.c const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1]; ct 778 net/netfilter/nf_conntrack_proto_tcp.c memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); ct 780 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_end = ct 783 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window); ct 784 net/netfilter/nf_conntrack_proto_tcp.c if (ct->proto.tcp.seen[0].td_maxwin == 0) ct 785 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxwin = 1; ct 786 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxend = ct 787 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_end; ct 789 net/netfilter/nf_conntrack_proto_tcp.c tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]); ct 794 net/netfilter/nf_conntrack_proto_tcp.c memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); ct 800 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_end = ct 803 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window); ct 804 net/netfilter/nf_conntrack_proto_tcp.c if (ct->proto.tcp.seen[0].td_maxwin == 0) ct 805 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxwin = 1; ct 806 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxend = ct 807 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_end + ct 808 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_maxwin; ct 812 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].flags = ct 813 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | ct 818 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_index = TCP_NONE_SET; ct 830 net/netfilter/nf_conntrack_proto_tcp.c static bool nf_conntrack_tcp_established(const struct nf_conn *ct) ct 832 net/netfilter/nf_conntrack_proto_tcp.c return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED && ct 833 net/netfilter/nf_conntrack_proto_tcp.c test_bit(IPS_ASSURED_BIT, &ct->status); ct 837 net/netfilter/nf_conntrack_proto_tcp.c int nf_conntrack_tcp_packet(struct nf_conn *ct, ct 843 net/netfilter/nf_conntrack_proto_tcp.c struct net *net = nf_ct_net(ct); ct 860 net/netfilter/nf_conntrack_proto_tcp.c if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th)) ct 863 net/netfilter/nf_conntrack_proto_tcp.c spin_lock_bh(&ct->lock); ct 864 net/netfilter/nf_conntrack_proto_tcp.c old_state = ct->proto.tcp.state; ct 868 net/netfilter/nf_conntrack_proto_tcp.c tuple = &ct->tuplehash[dir].tuple; ct 886 net/netfilter/nf_conntrack_proto_tcp.c if (((ct->proto.tcp.seen[dir].flags ct 887 net/netfilter/nf_conntrack_proto_tcp.c | ct->proto.tcp.seen[!dir].flags) ct 889 net/netfilter/nf_conntrack_proto_tcp.c || (ct->proto.tcp.last_dir == dir ct 890 net/netfilter/nf_conntrack_proto_tcp.c && ct->proto.tcp.last_index == TCP_RST_SET)) { ct 893 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 899 net/netfilter/nf_conntrack_proto_tcp.c if (nf_ct_kill(ct)) ct 919 net/netfilter/nf_conntrack_proto_tcp.c && ct->proto.tcp.last_index == TCP_SYN_SET ct 920 net/netfilter/nf_conntrack_proto_tcp.c && ct->proto.tcp.last_dir != dir ct 921 net/netfilter/nf_conntrack_proto_tcp.c && ntohl(th->ack_seq) == ct->proto.tcp.last_end) { ct 930 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end = ct 931 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_end; ct 932 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend = ct 933 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_end; ct 934 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin = ct 935 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_win == 0 ? ct 936 net/netfilter/nf_conntrack_proto_tcp.c 1 : ct->proto.tcp.last_win; ct 937 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale = ct 938 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_wscale; ct 939 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; ct 940 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = ct 941 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags; ct 942 net/netfilter/nf_conntrack_proto_tcp.c memset(&ct->proto.tcp.seen[dir], 0, ct 946 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_index = index; ct 947 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_dir = dir; ct 948 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_seq = ntohl(th->seq); ct 949 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_end = ct 951 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_win = ntohs(th->window); ct 963 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags = ct 964 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_wscale = 0; ct 967 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags |= ct 969 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_wscale = seen.td_scale; ct 972 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags |= ct 980 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags |= ct 983 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 984 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_l4proto_log_invalid(skb, ct, "invalid packet ignored in " ct 994 net/netfilter/nf_conntrack_proto_tcp.c if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT && ct 996 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL && ct 997 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) { ct 999 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1006 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1007 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_l4proto_log_invalid(skb, ct, "invalid state"); ct 1016 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_dir != dir && ct 1017 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_index == TCP_SYN_SET && ct 1018 net/netfilter/nf_conntrack_proto_tcp.c (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) { ct 1020 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; ct 1021 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1022 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored"); ct 1030 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN; ct 1034 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN) ct 1041 net/netfilter/nf_conntrack_proto_tcp.c if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) { ct 1044 net/netfilter/nf_conntrack_proto_tcp.c if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) { ct 1046 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1047 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_l4proto_log_invalid(skb, ct, "invalid rst"); ct 1051 net/netfilter/nf_conntrack_proto_tcp.c if (!nf_conntrack_tcp_established(ct) || ct 1052 net/netfilter/nf_conntrack_proto_tcp.c seq == ct->proto.tcp.seen[!dir].td_maxack) ct 1059 net/netfilter/nf_conntrack_proto_tcp.c if (ct->proto.tcp.last_index == TCP_ACK_SET && ct 1060 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_dir == dir && ct 1061 net/netfilter/nf_conntrack_proto_tcp.c seq == ct->proto.tcp.last_end) ct 1069 net/netfilter/nf_conntrack_proto_tcp.c if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ct 1070 net/netfilter/nf_conntrack_proto_tcp.c && ct->proto.tcp.last_index == TCP_SYN_SET) ct 1071 net/netfilter/nf_conntrack_proto_tcp.c || (!test_bit(IPS_ASSURED_BIT, &ct->status) ct 1072 net/netfilter/nf_conntrack_proto_tcp.c && ct->proto.tcp.last_index == TCP_ACK_SET)) ct 1073 net/netfilter/nf_conntrack_proto_tcp.c && ntohl(th->ack_seq) == ct->proto.tcp.last_end) { ct 1091 net/netfilter/nf_conntrack_proto_tcp.c if (!tcp_in_window(ct, &ct->proto.tcp, dir, index, ct 1093 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1098 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_index = index; ct 1099 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.last_dir = dir; ct 1108 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.state = new_state; ct 1111 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; ct 1113 net/netfilter/nf_conntrack_proto_tcp.c timeouts = nf_ct_timeout_lookup(ct); ct 1117 net/netfilter/nf_conntrack_proto_tcp.c if (ct->proto.tcp.retrans >= tn->tcp_max_retrans && ct 1122 net/netfilter/nf_conntrack_proto_tcp.c else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & ct 1126 net/netfilter/nf_conntrack_proto_tcp.c else if (ct->proto.tcp.last_win == 0 && ct 1131 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1134 net/netfilter/nf_conntrack_proto_tcp.c nf_conntrack_event_cache(IPCT_PROTOINFO, ct); ct 1136 net/netfilter/nf_conntrack_proto_tcp.c if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { ct 1142 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_kill_acct(ct, ctinfo, skb); ct 1151 net/netfilter/nf_conntrack_proto_tcp.c } else if (!test_bit(IPS_ASSURED_BIT, &ct->status) ct 1158 net/netfilter/nf_conntrack_proto_tcp.c set_bit(IPS_ASSURED_BIT, &ct->status); ct 1159 net/netfilter/nf_conntrack_proto_tcp.c nf_conntrack_event_cache(IPCT_ASSURED, ct); ct 1161 net/netfilter/nf_conntrack_proto_tcp.c nf_ct_refresh_acct(ct, ctinfo, skb, timeout); ct 1166 net/netfilter/nf_conntrack_proto_tcp.c static bool tcp_can_early_drop(const struct nf_conn *ct) ct 1168 net/netfilter/nf_conntrack_proto_tcp.c switch (ct->proto.tcp.state) { ct 1188 net/netfilter/nf_conntrack_proto_tcp.c struct nf_conn *ct) ct 1193 net/netfilter/nf_conntrack_proto_tcp.c spin_lock_bh(&ct->lock); ct 1198 net/netfilter/nf_conntrack_proto_tcp.c if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) || ct 1200 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_scale) || ct 1202 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[1].td_scale)) ct 1205 net/netfilter/nf_conntrack_proto_tcp.c tmp.flags = ct->proto.tcp.seen[0].flags; ct 1210 net/netfilter/nf_conntrack_proto_tcp.c tmp.flags = ct->proto.tcp.seen[1].flags; ct 1214 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1221 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 1239 net/netfilter/nf_conntrack_proto_tcp.c static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) ct 1259 net/netfilter/nf_conntrack_proto_tcp.c spin_lock_bh(&ct->lock); ct 1261 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]); ct 1266 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].flags &= ~attr->mask; ct 1267 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask; ct 1273 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[1].flags &= ~attr->mask; ct 1274 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask; ct 1279 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE && ct 1280 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) { ct 1281 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[0].td_scale = ct 1283 net/netfilter/nf_conntrack_proto_tcp.c ct->proto.tcp.seen[1].td_scale = ct 1286 net/netfilter/nf_conntrack_proto_tcp.c spin_unlock_bh(&ct->lock); ct 75 net/netfilter/nf_conntrack_proto_udp.c state->net->ct.sysctl_checksum && ct 85 net/netfilter/nf_conntrack_proto_udp.c int nf_conntrack_udp_packet(struct nf_conn *ct, ct 96 net/netfilter/nf_conntrack_proto_udp.c timeouts = nf_ct_timeout_lookup(ct); ct 98 net/netfilter/nf_conntrack_proto_udp.c timeouts = udp_get_timeouts(nf_ct_net(ct)); ct 100 net/netfilter/nf_conntrack_proto_udp.c if (!nf_ct_is_confirmed(ct)) ct 101 net/netfilter/nf_conntrack_proto_udp.c ct->proto.udp.stream_ts = 2 * HZ + jiffies; ct 106 net/netfilter/nf_conntrack_proto_udp.c if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { ct 110 net/netfilter/nf_conntrack_proto_udp.c if (time_after(jiffies, ct->proto.udp.stream_ts)) ct 113 net/netfilter/nf_conntrack_proto_udp.c nf_ct_refresh_acct(ct, ctinfo, skb, extra); ct 116 net/netfilter/nf_conntrack_proto_udp.c if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) ct 117 net/netfilter/nf_conntrack_proto_udp.c nf_conntrack_event_cache(IPCT_ASSURED, ct); ct 119 net/netfilter/nf_conntrack_proto_udp.c nf_ct_refresh_acct(ct, ctinfo, skb, ct 166 net/netfilter/nf_conntrack_proto_udp.c state->net->ct.sysctl_checksum && ct 177 net/netfilter/nf_conntrack_proto_udp.c int nf_conntrack_udplite_packet(struct nf_conn *ct, ct 188 net/netfilter/nf_conntrack_proto_udp.c timeouts = nf_ct_timeout_lookup(ct); ct 190 net/netfilter/nf_conntrack_proto_udp.c timeouts = udp_get_timeouts(nf_ct_net(ct)); ct 194 net/netfilter/nf_conntrack_proto_udp.c if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { ct 195 net/netfilter/nf_conntrack_proto_udp.c nf_ct_refresh_acct(ct, ctinfo, skb, ct 198 net/netfilter/nf_conntrack_proto_udp.c if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) ct 199 net/netfilter/nf_conntrack_proto_udp.c nf_conntrack_event_cache(IPCT_ASSURED, ct); ct 201 net/netfilter/nf_conntrack_proto_udp.c nf_ct_refresh_acct(ct, ctinfo, skb, ct 64 net/netfilter/nf_conntrack_sane.c struct nf_conn *ct, ct 73 net/netfilter/nf_conntrack_sane.c struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct); ct 140 net/netfilter/nf_conntrack_sane.c exp = nf_ct_expect_alloc(ct); ct 142 net/netfilter/nf_conntrack_sane.c nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ct 147 net/netfilter/nf_conntrack_sane.c tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 148 net/netfilter/nf_conntrack_sane.c nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), ct 157 net/netfilter/nf_conntrack_sane.c nf_ct_helper_log(skb, ct, "cannot add expectation"); ct 10 net/netfilter/nf_conntrack_seqadj.c int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 20 net/netfilter/nf_conntrack_seqadj.c set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); ct 22 net/netfilter/nf_conntrack_seqadj.c seqadj = nfct_seqadj(ct); ct 30 net/netfilter/nf_conntrack_seqadj.c int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 33 net/netfilter/nf_conntrack_seqadj.c struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); ct 45 net/netfilter/nf_conntrack_seqadj.c set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); ct 47 net/netfilter/nf_conntrack_seqadj.c spin_lock_bh(&ct->lock); ct 55 net/netfilter/nf_conntrack_seqadj.c spin_unlock_bh(&ct->lock); ct 61 net/netfilter/nf_conntrack_seqadj.c struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 66 net/netfilter/nf_conntrack_seqadj.c if (nf_ct_protonum(ct) != IPPROTO_TCP) ct 70 net/netfilter/nf_conntrack_seqadj.c nf_ct_seqadj_set(ct, ctinfo, th->seq, off); ct 119 net/netfilter/nf_conntrack_seqadj.c struct nf_conn *ct, ct 123 net/netfilter/nf_conntrack_seqadj.c struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); ct 165 net/netfilter/nf_conntrack_seqadj.c struct nf_conn *ct, enum ip_conntrack_info ctinfo, ct 172 net/netfilter/nf_conntrack_seqadj.c struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); ct 183 net/netfilter/nf_conntrack_seqadj.c spin_lock_bh(&ct->lock); ct 212 net/netfilter/nf_conntrack_seqadj.c res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo); ct 214 net/netfilter/nf_conntrack_seqadj.c spin_unlock_bh(&ct->lock); ct 220 net/netfilter/nf_conntrack_seqadj.c s32 nf_ct_seq_offset(const struct nf_conn *ct, ct 224 net/netfilter/nf_conntrack_seqadj.c struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); ct 66 net/netfilter/nf_conntrack_sip.c static int string_len(const struct nf_conn *ct, const char *dptr, ct 78 net/netfilter/nf_conntrack_sip.c static int digits_len(const struct nf_conn *ct, const char *dptr, ct 110 net/netfilter/nf_conntrack_sip.c static int callid_len(const struct nf_conn *ct, const char *dptr, ct 129 net/netfilter/nf_conntrack_sip.c static int media_len(const struct nf_conn *ct, const char *dptr, ct 132 net/netfilter/nf_conntrack_sip.c int len = string_len(ct, dptr, limit, shift); ct 140 net/netfilter/nf_conntrack_sip.c return len + digits_len(ct, dptr, limit, shift); ct 143 net/netfilter/nf_conntrack_sip.c static int sip_parse_addr(const struct nf_conn *ct, const char *cp, ct 150 net/netfilter/nf_conntrack_sip.c if (!ct) ct 154 net/netfilter/nf_conntrack_sip.c switch (nf_ct_l3num(ct)) { ct 185 net/netfilter/nf_conntrack_sip.c static int epaddr_len(const struct nf_conn *ct, const char *dptr, ct 191 net/netfilter/nf_conntrack_sip.c if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) { ct 199 net/netfilter/nf_conntrack_sip.c dptr += digits_len(ct, dptr, limit, shift); ct 205 net/netfilter/nf_conntrack_sip.c static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr, ct 228 net/netfilter/nf_conntrack_sip.c return epaddr_len(ct, dptr, limit, shift); ct 237 net/netfilter/nf_conntrack_sip.c int ct_sip_parse_request(const struct nf_conn *ct, ct 248 net/netfilter/nf_conntrack_sip.c mlen = string_len(ct, dptr, limit, NULL); ct 264 net/netfilter/nf_conntrack_sip.c if (!skp_epaddr_len(ct, dptr, limit, &shift)) ct 268 net/netfilter/nf_conntrack_sip.c if (!sip_parse_addr(ct, dptr, &end, addr, limit, true)) ct 367 net/netfilter/nf_conntrack_sip.c int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, ct 424 net/netfilter/nf_conntrack_sip.c *matchlen = hdr->match_len(ct, dptr, limit, &shift); ct 435 net/netfilter/nf_conntrack_sip.c static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, ct 456 net/netfilter/nf_conntrack_sip.c *matchlen = hdr->match_len(ct, dptr, limit, &shift); ct 465 net/netfilter/nf_conntrack_sip.c static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, ct 474 net/netfilter/nf_conntrack_sip.c ret = ct_sip_next_header(ct, dptr, dataoff, datalen, ct 486 net/netfilter/nf_conntrack_sip.c ret = ct_sip_get_header(ct, dptr, dataoff, datalen, ct 505 net/netfilter/nf_conntrack_sip.c int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, ct 515 net/netfilter/nf_conntrack_sip.c ret = ct_sip_walk_headers(ct, dptr, dataoff ? *dataoff : 0, datalen, ct 521 net/netfilter/nf_conntrack_sip.c if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true)) ct 538 net/netfilter/nf_conntrack_sip.c static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr, ct 566 net/netfilter/nf_conntrack_sip.c int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, ct 584 net/netfilter/nf_conntrack_sip.c if (!sip_parse_addr(ct, start, &end, addr, limit, delim)) ct 593 net/netfilter/nf_conntrack_sip.c int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, ct 623 net/netfilter/nf_conntrack_sip.c static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, ct 629 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=", ct 638 net/netfilter/nf_conntrack_sip.c if (*proto != nf_ct_protonum(ct)) ct 641 net/netfilter/nf_conntrack_sip.c *proto = nf_ct_protonum(ct); ct 646 net/netfilter/nf_conntrack_sip.c static int sdp_parse_addr(const struct nf_conn *ct, const char *cp, ct 654 net/netfilter/nf_conntrack_sip.c switch (nf_ct_l3num(ct)) { ct 673 net/netfilter/nf_conntrack_sip.c static int sdp_addr_len(const struct nf_conn *ct, const char *dptr, ct 679 net/netfilter/nf_conntrack_sip.c if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) { ct 727 net/netfilter/nf_conntrack_sip.c int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, ct 737 net/netfilter/nf_conntrack_sip.c hdrs = nf_ct_l3num(ct) == NFPROTO_IPV4 ? ct_sdp_hdrs_v4 : ct_sdp_hdrs_v6; ct 771 net/netfilter/nf_conntrack_sip.c *matchlen = hdr->match_len(ct, dptr, limit, &shift); ct 781 net/netfilter/nf_conntrack_sip.c static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, ct 790 net/netfilter/nf_conntrack_sip.c ret = ct_sip_get_sdp_header(ct, dptr, dataoff, datalen, type, term, ct 795 net/netfilter/nf_conntrack_sip.c if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr, ct 801 net/netfilter/nf_conntrack_sip.c static int refresh_signalling_expectation(struct nf_conn *ct, ct 806 net/netfilter/nf_conntrack_sip.c struct nf_conn_help *help = nfct_help(ct); ct 828 net/netfilter/nf_conntrack_sip.c static void flush_expectations(struct nf_conn *ct, bool media) ct 830 net/netfilter/nf_conntrack_sip.c struct nf_conn_help *help = nfct_help(ct); ct 855 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 856 net/netfilter/nf_conntrack_sip.c struct net *net = nf_ct_net(ct); ct 867 net/netfilter/nf_conntrack_sip.c if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3)) ct 869 net/netfilter/nf_conntrack_sip.c saddr = &ct->tuplehash[!dir].tuple.src.u3; ct 878 net/netfilter/nf_conntrack_sip.c switch (nf_ct_l3num(ct)) { ct 918 net/netfilter/nf_conntrack_sip.c tuple.src.l3num = nf_ct_l3num(ct); ct 924 net/netfilter/nf_conntrack_sip.c exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); ct 926 net/netfilter/nf_conntrack_sip.c if (!exp || exp->master == ct || ct 927 net/netfilter/nf_conntrack_sip.c nfct_help(exp->master)->helper != nfct_help(ct)->helper || ct 934 net/netfilter/nf_conntrack_sip.c ct->status & IPS_NAT_MASK) { ct 959 net/netfilter/nf_conntrack_sip.c rtp_exp = nf_ct_expect_alloc(ct); ct 962 net/netfilter/nf_conntrack_sip.c nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr, ct 965 net/netfilter/nf_conntrack_sip.c rtcp_exp = nf_ct_expect_alloc(ct); ct 968 net/netfilter/nf_conntrack_sip.c nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr, ct 972 net/netfilter/nf_conntrack_sip.c if (hooks && ct->status & IPS_NAT_MASK && !direct_rtp) ct 1032 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1047 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, ct 1057 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen, ct 1064 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_sdp_header(ct, *dptr, mediaoff, *datalen, ct 1083 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "wrong port %u", port); ct 1089 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen, ct 1097 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse SDP message"); ct 1106 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, ct 1112 net/netfilter/nf_conntrack_sip.c if (maddr_len && hooks && ct->status & IPS_NAT_MASK) { ct 1119 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot mangle SDP"); ct 1128 net/netfilter/nf_conntrack_sip.c if (hooks && ct->status & IPS_NAT_MASK) ct 1141 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1142 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1148 net/netfilter/nf_conntrack_sip.c flush_expectations(ct, true); ct 1158 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1159 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1165 net/netfilter/nf_conntrack_sip.c flush_expectations(ct, true); ct 1175 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1176 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1182 net/netfilter/nf_conntrack_sip.c flush_expectations(ct, true); ct 1192 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1193 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1196 net/netfilter/nf_conntrack_sip.c flush_expectations(ct, true); ct 1209 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1211 net/netfilter/nf_conntrack_sip.c flush_expectations(ct, true); ct 1225 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1226 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1238 net/netfilter/nf_conntrack_sip.c if (ct->status & IPS_EXPECTED) ct 1249 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, ct 1253 net/netfilter/nf_conntrack_sip.c ret = ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, ct 1257 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse contact"); ct 1263 net/netfilter/nf_conntrack_sip.c if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) ct 1266 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen, ct 1270 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_numerical_param(ct, *dptr, ct 1273 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse expires"); ct 1282 net/netfilter/nf_conntrack_sip.c exp = nf_ct_expect_alloc(ct); ct 1284 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ct 1290 net/netfilter/nf_conntrack_sip.c saddr = &ct->tuplehash[!dir].tuple.src.u3; ct 1292 net/netfilter/nf_conntrack_sip.c nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), ct 1295 net/netfilter/nf_conntrack_sip.c exp->helper = nfct_help(ct)->helper; ct 1299 net/netfilter/nf_conntrack_sip.c if (hooks && ct->status & IPS_NAT_MASK) ct 1304 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot add expectation"); ct 1323 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1324 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1349 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_EXPIRES, ct 1356 net/netfilter/nf_conntrack_sip.c ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, ct 1361 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse contact"); ct 1367 net/netfilter/nf_conntrack_sip.c if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) ct 1370 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, ct 1374 net/netfilter/nf_conntrack_sip.c ret = ct_sip_parse_numerical_param(ct, *dptr, ct 1379 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse expires"); ct 1384 net/netfilter/nf_conntrack_sip.c if (refresh_signalling_expectation(ct, &addr, proto, port, ct 1390 net/netfilter/nf_conntrack_sip.c flush_expectations(ct, false); ct 1408 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1416 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot get code"); ct 1420 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, ct 1422 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse cseq"); ct 1427 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot get cseq"); ct 1452 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 1453 net/netfilter/nf_conntrack_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 1466 net/netfilter/nf_conntrack_sip.c if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, ct 1469 net/netfilter/nf_conntrack_sip.c port != ct->tuplehash[dir].tuple.src.u.udp.port && ct 1470 net/netfilter/nf_conntrack_sip.c nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3)) ct 1486 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, ct 1488 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot parse cseq"); ct 1493 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot get cseq"); ct 1503 net/netfilter/nf_conntrack_sip.c static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, ct 1515 net/netfilter/nf_conntrack_sip.c if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { ct 1519 net/netfilter/nf_conntrack_sip.c nf_ct_helper_log(skb, ct, "cannot NAT SIP message"); ct 1528 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 1551 net/netfilter/nf_conntrack_sip.c nf_ct_refresh(ct, skb, sip_timeout * HZ); ct 1562 net/netfilter/nf_conntrack_sip.c if (ct_sip_get_header(ct, dptr, 0, datalen, ct 1587 net/netfilter/nf_conntrack_sip.c ret = process_sip_msg(skb, ct, protoff, dataoff, ct 1600 net/netfilter/nf_conntrack_sip.c if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { ct 1612 net/netfilter/nf_conntrack_sip.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 1622 net/netfilter/nf_conntrack_sip.c nf_ct_refresh(ct, skb, sip_timeout * HZ); ct 1632 net/netfilter/nf_conntrack_sip.c return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen); ct 30 net/netfilter/nf_conntrack_snmp.c struct nf_conn *ct, ct 35 net/netfilter/nf_conntrack_snmp.c struct nf_conn *ct, ct 40 net/netfilter/nf_conntrack_snmp.c nf_conntrack_broadcast_help(skb, ct, ctinfo, timeout); ct 43 net/netfilter/nf_conntrack_snmp.c if (nf_nat_snmp && ct->status & IPS_NAT_MASK) ct 44 net/netfilter/nf_conntrack_snmp.c return nf_nat_snmp(skb, protoff, ct, ctinfo); ct 173 net/netfilter/nf_conntrack_standalone.c static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) ct 179 net/netfilter/nf_conntrack_standalone.c ret = security_secid_to_secctx(ct->secmark, &secctx, &len); ct 188 net/netfilter/nf_conntrack_standalone.c static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) ct 194 net/netfilter/nf_conntrack_standalone.c static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct, ct 197 net/netfilter/nf_conntrack_standalone.c const struct nf_conntrack_zone *zone = nf_ct_zone(ct); ct 216 net/netfilter/nf_conntrack_standalone.c static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct, ct 223 net/netfilter/nf_conntrack_standalone.c static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) ct 229 net/netfilter/nf_conntrack_standalone.c tstamp = nf_conn_tstamp_find(ct); ct 244 net/netfilter/nf_conntrack_standalone.c ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) ct 275 net/netfilter/nf_conntrack_standalone.c seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) ct 280 net/netfilter/nf_conntrack_standalone.c acct = nf_conn_acct_find(ct); ct 296 net/netfilter/nf_conntrack_standalone.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); ct 301 net/netfilter/nf_conntrack_standalone.c WARN_ON(!ct); ct 302 net/netfilter/nf_conntrack_standalone.c if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) ct 305 net/netfilter/nf_conntrack_standalone.c if (nf_ct_should_gc(ct)) { ct 306 net/netfilter/nf_conntrack_standalone.c nf_ct_kill(ct); ct 314 net/netfilter/nf_conntrack_standalone.c if (!net_eq(nf_ct_net(ct), net)) ct 317 net/netfilter/nf_conntrack_standalone.c l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ct 321 net/netfilter/nf_conntrack_standalone.c l3proto_name(nf_ct_l3num(ct)), nf_ct_l3num(ct), ct 322 net/netfilter/nf_conntrack_standalone.c l4proto_name(l4proto->l4proto), nf_ct_protonum(ct)); ct 324 net/netfilter/nf_conntrack_standalone.c if (!test_bit(IPS_OFFLOAD_BIT, &ct->status)) ct 325 net/netfilter/nf_conntrack_standalone.c seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ); ct 328 net/netfilter/nf_conntrack_standalone.c l4proto->print_conntrack(s, ct); ct 330 net/netfilter/nf_conntrack_standalone.c print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, ct 333 net/netfilter/nf_conntrack_standalone.c ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG); ct 338 net/netfilter/nf_conntrack_standalone.c if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) ct 341 net/netfilter/nf_conntrack_standalone.c if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) ct 344 net/netfilter/nf_conntrack_standalone.c print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l4proto); ct 346 net/netfilter/nf_conntrack_standalone.c ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL); ct 348 net/netfilter/nf_conntrack_standalone.c if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) ct 351 net/netfilter/nf_conntrack_standalone.c if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) ct 353 net/netfilter/nf_conntrack_standalone.c else if (test_bit(IPS_ASSURED_BIT, &ct->status)) ct 360 net/netfilter/nf_conntrack_standalone.c seq_printf(s, "mark=%u ", ct->mark); ct 363 net/netfilter/nf_conntrack_standalone.c ct_show_secctx(s, ct); ct 364 net/netfilter/nf_conntrack_standalone.c ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR); ct 365 net/netfilter/nf_conntrack_standalone.c ct_show_delta_time(s, ct); ct 367 net/netfilter/nf_conntrack_standalone.c seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)); ct 374 net/netfilter/nf_conntrack_standalone.c nf_ct_put(ct); ct 397 net/netfilter/nf_conntrack_standalone.c return per_cpu_ptr(net->ct.stat, cpu); ct 412 net/netfilter/nf_conntrack_standalone.c return per_cpu_ptr(net->ct.stat, cpu); ct 425 net/netfilter/nf_conntrack_standalone.c unsigned int nr_conntracks = atomic_read(&net->ct.count); ct 612 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.count, ct 626 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.sysctl_checksum, ct 635 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.sysctl_log_invalid, ct 651 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.sysctl_acct, ct 660 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.sysctl_auto_assign_helper, ct 670 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.sysctl_events, ct 681 net/netfilter/nf_conntrack_standalone.c .data = &init_net.ct.sysctl_tstamp, ct 1035 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; ct 1036 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; ct 1037 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; ct 1038 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; ct 1039 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; ct 1041 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; ct 1044 net/netfilter/nf_conntrack_standalone.c table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; ct 1073 net/netfilter/nf_conntrack_standalone.c net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); ct 1074 net/netfilter/nf_conntrack_standalone.c if (!net->ct.sysctl_header) ct 1088 net/netfilter/nf_conntrack_standalone.c table = net->ct.sysctl_header->ctl_table_arg; ct 1089 net/netfilter/nf_conntrack_standalone.c unregister_net_sysctl_table(net->ct.sysctl_header); ct 1116 net/netfilter/nf_conntrack_standalone.c net->ct.sysctl_checksum = 1; ct 42 net/netfilter/nf_conntrack_tftp.c struct nf_conn *ct, ct 61 net/netfilter/nf_conntrack_tftp.c nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 62 net/netfilter/nf_conntrack_tftp.c nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 64 net/netfilter/nf_conntrack_tftp.c exp = nf_ct_expect_alloc(ct); ct 66 net/netfilter/nf_conntrack_tftp.c nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ct 69 net/netfilter/nf_conntrack_tftp.c tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 71 net/netfilter/nf_conntrack_tftp.c nf_ct_l3num(ct), ct 79 net/netfilter/nf_conntrack_tftp.c if (nf_nat_tftp && ct->status & IPS_NAT_MASK) ct 82 net/netfilter/nf_conntrack_tftp.c nf_ct_helper_log(skb, ct, "cannot add expectation"); ct 32 net/netfilter/nf_conntrack_timeout.c static int untimeout(struct nf_conn *ct, void *timeout) ct 34 net/netfilter/nf_conntrack_timeout.c struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct); ct 58 net/netfilter/nf_conntrack_timeout.c int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, ct 100 net/netfilter/nf_conntrack_timeout.c timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); ct 119 net/netfilter/nf_conntrack_timeout.c void nf_ct_destroy_timeout(struct nf_conn *ct) ct 128 net/netfilter/nf_conntrack_timeout.c timeout_ext = nf_ct_timeout_find(ct); ct 30 net/netfilter/nf_conntrack_timestamp.c net->ct.sysctl_tstamp = nf_ct_tstamp; ct 19 net/netfilter/nf_flow_table_core.c struct nf_conn *ct; ct 27 net/netfilter/nf_flow_table_core.c flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, ct 32 net/netfilter/nf_flow_table_core.c struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; ct 61 net/netfilter/nf_flow_table_core.c flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route) ct 66 net/netfilter/nf_flow_table_core.c if (unlikely(nf_ct_is_dying(ct) || ct 67 net/netfilter/nf_flow_table_core.c !atomic_inc_not_zero(&ct->ct_general.use))) ct 82 net/netfilter/nf_flow_table_core.c entry->ct = ct; ct 84 net/netfilter/nf_flow_table_core.c flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL); ct 85 net/netfilter/nf_flow_table_core.c flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY); ct 87 net/netfilter/nf_flow_table_core.c if (ct->status & IPS_SRC_NAT) ct 89 net/netfilter/nf_flow_table_core.c if (ct->status & IPS_DST_NAT) ct 99 net/netfilter/nf_flow_table_core.c nf_ct_put(ct); ct 120 net/netfilter/nf_flow_table_core.c static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) ct 123 net/netfilter/nf_flow_table_core.c int l4num = nf_ct_protonum(ct); ct 137 net/netfilter/nf_flow_table_core.c if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) ct 138 net/netfilter/nf_flow_table_core.c ct->timeout = nfct_time_stamp + timeout; ct 141 net/netfilter/nf_flow_table_core.c static void flow_offload_fixup_ct_state(struct nf_conn *ct) ct 143 net/netfilter/nf_flow_table_core.c if (nf_ct_protonum(ct) == IPPROTO_TCP) ct 144 net/netfilter/nf_flow_table_core.c flow_offload_fixup_tcp(&ct->proto.tcp); ct 147 net/netfilter/nf_flow_table_core.c static void flow_offload_fixup_ct(struct nf_conn *ct) ct 149 net/netfilter/nf_flow_table_core.c flow_offload_fixup_ct_state(ct); ct 150 net/netfilter/nf_flow_table_core.c flow_offload_fixup_ct_timeout(ct); ct 161 net/netfilter/nf_flow_table_core.c nf_ct_delete(e->ct, 0, 0); ct 162 net/netfilter/nf_flow_table_core.c nf_ct_put(e->ct); ct 245 net/netfilter/nf_flow_table_core.c clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); ct 248 net/netfilter/nf_flow_table_core.c flow_offload_fixup_ct(e->ct); ct 250 net/netfilter/nf_flow_table_core.c flow_offload_fixup_ct_timeout(e->ct); ct 262 net/netfilter/nf_flow_table_core.c flow_offload_fixup_ct_state(e->ct); ct 286 net/netfilter/nf_flow_table_core.c if (unlikely(nf_ct_is_dying(e->ct))) ct 333 net/netfilter/nf_flow_table_core.c if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) || ct 496 net/netfilter/nf_flow_table_core.c if (net_eq(nf_ct_net(e->ct), dev_net(dev)) && ct 56 net/netfilter/nf_nat_core.c const struct nf_conn *ct, ct 61 net/netfilter/nf_nat_core.c const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; ct 64 net/netfilter/nf_nat_core.c if (ct->status & statusbit) { ct 76 net/netfilter/nf_nat_core.c if (ct->status & statusbit) { ct 88 net/netfilter/nf_nat_core.c const struct nf_conn *ct, ct 94 net/netfilter/nf_nat_core.c const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; ct 97 net/netfilter/nf_nat_core.c if (ct->status & statusbit) { ct 109 net/netfilter/nf_nat_core.c if (ct->status & statusbit) { ct 123 net/netfilter/nf_nat_core.c const struct nf_conn *ct; ct 129 net/netfilter/nf_nat_core.c ct = nf_ct_get(skb, &ctinfo); ct 130 net/netfilter/nf_nat_core.c if (ct == NULL) ct 133 net/netfilter/nf_nat_core.c family = nf_ct_l3num(ct); ct 142 net/netfilter/nf_nat_core.c nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl); ct 145 net/netfilter/nf_nat_core.c nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl); ct 283 net/netfilter/nf_nat_core.c same_src(const struct nf_conn *ct, ct 288 net/netfilter/nf_nat_core.c t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 303 net/netfilter/nf_nat_core.c const struct nf_conn *ct; ct 305 net/netfilter/nf_nat_core.c hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { ct 306 net/netfilter/nf_nat_core.c if (same_src(ct, tuple) && ct 307 net/netfilter/nf_nat_core.c net_eq(net, nf_ct_net(ct)) && ct 308 net/netfilter/nf_nat_core.c nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { ct 311 net/netfilter/nf_nat_core.c &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 331 net/netfilter/nf_nat_core.c const struct nf_conn *ct, ct 355 net/netfilter/nf_nat_core.c if (nf_ct_l3num(ct) == NFPROTO_IPV4) ct 403 net/netfilter/nf_nat_core.c const struct nf_conn *ct) ct 428 net/netfilter/nf_nat_core.c if (!ct->master) ct 506 net/netfilter/nf_nat_core.c if (!nf_nat_used_tuple(tuple, ct)) ct 527 net/netfilter/nf_nat_core.c struct nf_conn *ct, ct 531 net/netfilter/nf_nat_core.c struct net *net = nf_ct_net(ct); ct 533 net/netfilter/nf_nat_core.c zone = nf_ct_zone(ct); ct 547 net/netfilter/nf_nat_core.c if (!nf_nat_used_tuple(orig_tuple, ct)) { ct 554 net/netfilter/nf_nat_core.c if (!nf_nat_used_tuple(tuple, ct)) ct 561 net/netfilter/nf_nat_core.c find_best_ips_proto(zone, tuple, range, ct, maniptype); ct 575 net/netfilter/nf_nat_core.c !nf_nat_used_tuple(tuple, ct))) ct 577 net/netfilter/nf_nat_core.c } else if (!nf_nat_used_tuple(tuple, ct)) { ct 583 net/netfilter/nf_nat_core.c nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct); ct 586 net/netfilter/nf_nat_core.c struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) ct 588 net/netfilter/nf_nat_core.c struct nf_conn_nat *nat = nfct_nat(ct); ct 592 net/netfilter/nf_nat_core.c if (!nf_ct_is_confirmed(ct)) ct 593 net/netfilter/nf_nat_core.c nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); ct 600 net/netfilter/nf_nat_core.c nf_nat_setup_info(struct nf_conn *ct, ct 604 net/netfilter/nf_nat_core.c struct net *net = nf_ct_net(ct); ct 608 net/netfilter/nf_nat_core.c if (nf_ct_is_confirmed(ct)) ct 614 net/netfilter/nf_nat_core.c if (WARN_ON(nf_nat_initialized(ct, maniptype))) ct 623 net/netfilter/nf_nat_core.c &ct->tuplehash[IP_CT_DIR_REPLY].tuple); ct 625 net/netfilter/nf_nat_core.c get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); ct 632 net/netfilter/nf_nat_core.c nf_conntrack_alter_reply(ct, &reply); ct 636 net/netfilter/nf_nat_core.c ct->status |= IPS_SRC_NAT; ct 638 net/netfilter/nf_nat_core.c ct->status |= IPS_DST_NAT; ct 640 net/netfilter/nf_nat_core.c if (nfct_help(ct) && !nfct_seqadj(ct)) ct 641 net/netfilter/nf_nat_core.c if (!nfct_seqadj_ext_add(ct)) ct 650 net/netfilter/nf_nat_core.c &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 653 net/netfilter/nf_nat_core.c hlist_add_head_rcu(&ct->nat_bysource, ct 660 net/netfilter/nf_nat_core.c ct->status |= IPS_DST_NAT_DONE; ct 662 net/netfilter/nf_nat_core.c ct->status |= IPS_SRC_NAT_DONE; ct 669 net/netfilter/nf_nat_core.c __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) ct 677 net/netfilter/nf_nat_core.c ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : ct 678 net/netfilter/nf_nat_core.c ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); ct 684 net/netfilter/nf_nat_core.c return nf_nat_setup_info(ct, &range, manip); ct 688 net/netfilter/nf_nat_core.c nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) ct 690 net/netfilter/nf_nat_core.c return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); ct 695 net/netfilter/nf_nat_core.c unsigned int nf_nat_packet(struct nf_conn *ct, ct 715 net/netfilter/nf_nat_core.c if (ct->status & statusbit) ct 716 net/netfilter/nf_nat_core.c verdict = nf_nat_manip_pkt(skb, ct, mtype, dir); ct 726 net/netfilter/nf_nat_core.c struct nf_conn *ct; ct 732 net/netfilter/nf_nat_core.c ct = nf_ct_get(skb, &ctinfo); ct 738 net/netfilter/nf_nat_core.c if (!ct) ct 741 net/netfilter/nf_nat_core.c nat = nfct_nat(ct); ct 751 net/netfilter/nf_nat_core.c if (!nf_nat_initialized(ct, maniptype)) { ct 765 net/netfilter/nf_nat_core.c if (nf_nat_initialized(ct, maniptype)) ct 769 net/netfilter/nf_nat_core.c ret = nf_nat_alloc_null_binding(ct, state->hook); ct 775 net/netfilter/nf_nat_core.c ct, ct->status); ct 789 net/netfilter/nf_nat_core.c return nf_nat_packet(ct, ctinfo, state->hook, skb); ct 792 net/netfilter/nf_nat_core.c nf_ct_kill_acct(ct, ctinfo, skb); ct 814 net/netfilter/nf_nat_core.c static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) ct 818 net/netfilter/nf_nat_core.c h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 820 net/netfilter/nf_nat_core.c hlist_del_rcu(&ct->nat_bysource); ct 824 net/netfilter/nf_nat_core.c static int nf_nat_proto_clean(struct nf_conn *ct, void *data) ct 826 net/netfilter/nf_nat_core.c if (nf_nat_proto_remove(ct, data)) ct 835 net/netfilter/nf_nat_core.c if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status)) ct 836 net/netfilter/nf_nat_core.c __nf_nat_cleanup_conntrack(ct); ct 845 net/netfilter/nf_nat_core.c static void nf_nat_cleanup_conntrack(struct nf_conn *ct) ct 847 net/netfilter/nf_nat_core.c if (ct->status & IPS_SRC_NAT_DONE) ct 848 net/netfilter/nf_nat_core.c __nf_nat_cleanup_conntrack(ct); ct 884 net/netfilter/nf_nat_core.c const struct nf_conn *ct, ct 942 net/netfilter/nf_nat_core.c const struct nf_conn *ct, struct nf_nat_range2 *range) ct 954 net/netfilter/nf_nat_core.c switch (nf_ct_l3num(ct)) { ct 972 net/netfilter/nf_nat_core.c return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); ct 977 net/netfilter/nf_nat_core.c nfnetlink_parse_nat_setup(struct nf_conn *ct, ct 987 net/netfilter/nf_nat_core.c if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) ct 992 net/netfilter/nf_nat_core.c return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; ct 994 net/netfilter/nf_nat_core.c err = nfnetlink_parse_nat(attr, ct, &range); ct 998 net/netfilter/nf_nat_core.c return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; ct 1002 net/netfilter/nf_nat_core.c nfnetlink_parse_nat_setup(struct nf_conn *ct, ct 33 net/netfilter/nf_nat_ftp.c static int nf_nat_ftp_fmt_cmd(struct nf_conn *ct, enum nf_ct_ftp_type type, ct 48 net/netfilter/nf_nat_ftp.c if (nf_ct_l3num(ct) == NFPROTO_IPV4) ct 74 net/netfilter/nf_nat_ftp.c struct nf_conn *ct = exp->master; ct 81 net/netfilter/nf_nat_ftp.c newaddr = ct->tuplehash[!dir].tuple.dst.u3; ct 104 net/netfilter/nf_nat_ftp.c nf_ct_helper_log(skb, ct, "all ports in use"); ct 108 net/netfilter/nf_nat_ftp.c buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer), ct 115 net/netfilter/nf_nat_ftp.c if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, ct 122 net/netfilter/nf_nat_ftp.c nf_ct_helper_log(skb, ct, "cannot mangle packet"); ct 87 net/netfilter/nf_nat_helper.c struct nf_conn *ct, ct 114 net/netfilter/nf_nat_helper.c nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_TCP, ct 118 net/netfilter/nf_nat_helper.c nf_ct_seqadj_set(ct, ctinfo, tcph->seq, ct 137 net/netfilter/nf_nat_helper.c struct nf_conn *ct, ct 170 net/netfilter/nf_nat_helper.c nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_UDP, ct 179 net/netfilter/nf_nat_helper.c void nf_nat_follow_master(struct nf_conn *ct, ct 185 net/netfilter/nf_nat_helper.c BUG_ON(ct->status & IPS_NAT_DONE_MASK); ct 190 net/netfilter/nf_nat_helper.c = ct->master->tuplehash[!exp->dir].tuple.dst.u3; ct 191 net/netfilter/nf_nat_helper.c nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); ct 197 net/netfilter/nf_nat_helper.c = ct->master->tuplehash[!exp->dir].tuple.src.u3; ct 198 net/netfilter/nf_nat_helper.c nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); ct 40 net/netfilter/nf_nat_irc.c struct nf_conn *ct = exp->master; ct 45 net/netfilter/nf_nat_irc.c newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; ct 66 net/netfilter/nf_nat_irc.c nf_ct_helper_log(skb, ct, "all ports in use"); ct 88 net/netfilter/nf_nat_irc.c if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, ct 90 net/netfilter/nf_nat_irc.c nf_ct_helper_log(skb, ct, "cannot mangle packet"); ct 20 net/netfilter/nf_nat_masquerade.c struct nf_conn *ct; ct 29 net/netfilter/nf_nat_masquerade.c ct = nf_ct_get(skb, &ctinfo); ct 31 net/netfilter/nf_nat_masquerade.c WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ct 37 net/netfilter/nf_nat_masquerade.c if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) ct 48 net/netfilter/nf_nat_masquerade.c nat = nf_ct_nat_ext_add(ct); ct 62 net/netfilter/nf_nat_masquerade.c return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); ct 95 net/netfilter/nf_nat_masquerade.c static int inet_cmp(struct nf_conn *ct, void *ptr) ct 101 net/netfilter/nf_nat_masquerade.c if (!device_cmp(ct, (void *)(long)dev->ifindex)) ct 104 net/netfilter/nf_nat_masquerade.c tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 165 net/netfilter/nf_nat_masquerade.c struct nf_conn *ct; ct 168 net/netfilter/nf_nat_masquerade.c ct = nf_ct_get(skb, &ctinfo); ct 169 net/netfilter/nf_nat_masquerade.c WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ct 172 net/netfilter/nf_nat_masquerade.c if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out, ct 176 net/netfilter/nf_nat_masquerade.c nat = nf_ct_nat_ext_add(ct); ct 186 net/netfilter/nf_nat_masquerade.c return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); ct 197 net/netfilter/nf_nat_masquerade.c static int inet6_cmp(struct nf_conn *ct, void *work) ct 202 net/netfilter/nf_nat_masquerade.c if (!device_cmp(ct, (void *)(long)w->ifindex)) ct 205 net/netfilter/nf_nat_masquerade.c tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 419 net/netfilter/nf_nat_proto.c unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct, ct 426 net/netfilter/nf_nat_proto.c nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); ct 560 net/netfilter/nf_nat_proto.c struct nf_conn *ct, ct 583 net/netfilter/nf_nat_proto.c if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) ct 585 net/netfilter/nf_nat_proto.c if (ct->status & IPS_NAT_MASK) ct 598 net/netfilter/nf_nat_proto.c if (!(ct->status & statusbit)) ct 602 net/netfilter/nf_nat_proto.c &ct->tuplehash[!dir].tuple, !manip)) ct 615 net/netfilter/nf_nat_proto.c nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); ct 628 net/netfilter/nf_nat_proto.c struct nf_conn *ct; ct 631 net/netfilter/nf_nat_proto.c ct = nf_ct_get(skb, &ctinfo); ct 632 net/netfilter/nf_nat_proto.c if (!ct) ct 637 net/netfilter/nf_nat_proto.c if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, ct 667 net/netfilter/nf_nat_proto.c const struct nf_conn *ct; ct 681 net/netfilter/nf_nat_proto.c ct = nf_ct_get(skb, &ctinfo); ct 682 net/netfilter/nf_nat_proto.c if (ct) { ct 685 net/netfilter/nf_nat_proto.c if (ct->tuplehash[dir].tuple.src.u3.ip != ct 686 net/netfilter/nf_nat_proto.c ct->tuplehash[!dir].tuple.dst.u3.ip || ct 687 net/netfilter/nf_nat_proto.c (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ct 688 net/netfilter/nf_nat_proto.c ct->tuplehash[dir].tuple.src.u.all != ct 689 net/netfilter/nf_nat_proto.c ct->tuplehash[!dir].tuple.dst.u.all)) { ct 703 net/netfilter/nf_nat_proto.c const struct nf_conn *ct; ct 712 net/netfilter/nf_nat_proto.c ct = nf_ct_get(skb, &ctinfo); ct 713 net/netfilter/nf_nat_proto.c if (ct) { ct 716 net/netfilter/nf_nat_proto.c if (ct->tuplehash[dir].tuple.dst.u3.ip != ct 717 net/netfilter/nf_nat_proto.c ct->tuplehash[!dir].tuple.src.u3.ip) { ct 724 net/netfilter/nf_nat_proto.c ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ct 725 net/netfilter/nf_nat_proto.c ct->tuplehash[dir].tuple.dst.u.all != ct 726 net/netfilter/nf_nat_proto.c ct->tuplehash[!dir].tuple.src.u.all) { ct 782 net/netfilter/nf_nat_proto.c struct nf_conn *ct, ct 805 net/netfilter/nf_nat_proto.c if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) ct 807 net/netfilter/nf_nat_proto.c if (ct->status & IPS_NAT_MASK) ct 820 net/netfilter/nf_nat_proto.c if (!(ct->status & statusbit)) ct 824 net/netfilter/nf_nat_proto.c &ct->tuplehash[!dir].tuple, !manip)) ct 839 net/netfilter/nf_nat_proto.c nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); ct 852 net/netfilter/nf_nat_proto.c struct nf_conn *ct; ct 858 net/netfilter/nf_nat_proto.c ct = nf_ct_get(skb, &ctinfo); ct 864 net/netfilter/nf_nat_proto.c if (!ct) ct 873 net/netfilter/nf_nat_proto.c if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, ct 905 net/netfilter/nf_nat_proto.c const struct nf_conn *ct; ct 918 net/netfilter/nf_nat_proto.c ct = nf_ct_get(skb, &ctinfo); ct 919 net/netfilter/nf_nat_proto.c if (ct) { ct 922 net/netfilter/nf_nat_proto.c if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, ct 923 net/netfilter/nf_nat_proto.c &ct->tuplehash[!dir].tuple.dst.u3) || ct 924 net/netfilter/nf_nat_proto.c (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && ct 925 net/netfilter/nf_nat_proto.c ct->tuplehash[dir].tuple.src.u.all != ct 926 net/netfilter/nf_nat_proto.c ct->tuplehash[!dir].tuple.dst.u.all)) { ct 941 net/netfilter/nf_nat_proto.c const struct nf_conn *ct; ct 950 net/netfilter/nf_nat_proto.c ct = nf_ct_get(skb, &ctinfo); ct 951 net/netfilter/nf_nat_proto.c if (ct) { ct 954 net/netfilter/nf_nat_proto.c if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, ct 955 net/netfilter/nf_nat_proto.c &ct->tuplehash[!dir].tuple.src.u3)) { ct 962 net/netfilter/nf_nat_proto.c ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && ct 963 net/netfilter/nf_nat_proto.c ct->tuplehash[dir].tuple.dst.u.all != ct 964 net/netfilter/nf_nat_proto.c ct->tuplehash[!dir].tuple.src.u.all) { ct 32 net/netfilter/nf_nat_redirect.c struct nf_conn *ct; ct 40 net/netfilter/nf_nat_redirect.c ct = nf_ct_get(skb, &ctinfo); ct 41 net/netfilter/nf_nat_redirect.c WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED))); ct 74 net/netfilter/nf_nat_redirect.c return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); ct 87 net/netfilter/nf_nat_redirect.c struct nf_conn *ct; ct 89 net/netfilter/nf_nat_redirect.c ct = nf_ct_get(skb, &ctinfo); ct 118 net/netfilter/nf_nat_redirect.c return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); ct 41 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 45 net/netfilter/nf_nat_sip.c if (nf_ct_protonum(ct) == IPPROTO_TCP) { ct 50 net/netfilter/nf_nat_sip.c if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo, ct 58 net/netfilter/nf_nat_sip.c if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, ct 70 net/netfilter/nf_nat_sip.c static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer, ct 73 net/netfilter/nf_nat_sip.c if (nf_ct_l3num(ct) == NFPROTO_IPV4) ct 83 net/netfilter/nf_nat_sip.c static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer, ct 86 net/netfilter/nf_nat_sip.c if (nf_ct_l3num(ct) == NFPROTO_IPV4) ct 99 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 101 net/netfilter/nf_nat_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 107 net/netfilter/nf_nat_sip.c if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, addr) && ct 108 net/netfilter/nf_nat_sip.c ct->tuplehash[dir].tuple.src.u.udp.port == port) { ct 109 net/netfilter/nf_nat_sip.c newaddr = ct->tuplehash[!dir].tuple.dst.u3; ct 110 net/netfilter/nf_nat_sip.c newport = ct->tuplehash[!dir].tuple.dst.u.udp.port; ct 111 net/netfilter/nf_nat_sip.c } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) && ct 112 net/netfilter/nf_nat_sip.c ct->tuplehash[dir].tuple.dst.u.udp.port == port) { ct 113 net/netfilter/nf_nat_sip.c newaddr = ct->tuplehash[!dir].tuple.src.u3; ct 115 net/netfilter/nf_nat_sip.c ct->tuplehash[!dir].tuple.src.u.udp.port; ct 122 net/netfilter/nf_nat_sip.c buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport)); ct 133 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 138 net/netfilter/nf_nat_sip.c if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, ct 150 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 152 net/netfilter/nf_nat_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 161 net/netfilter/nf_nat_sip.c if (ct_sip_parse_request(ct, *dptr, *datalen, ct 166 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle SIP message"); ct 173 net/netfilter/nf_nat_sip.c if (nf_ct_protonum(ct) == IPPROTO_TCP) ct 179 net/netfilter/nf_nat_sip.c if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, ct 189 net/netfilter/nf_nat_sip.c &ct->tuplehash[dir].tuple.src.u3) || ct 190 net/netfilter/nf_nat_sip.c port != ct->tuplehash[dir].tuple.src.u.udp.port) ct 194 net/netfilter/nf_nat_sip.c &ct->tuplehash[dir].tuple.dst.u3) || ct 195 net/netfilter/nf_nat_sip.c port != ct->tuplehash[dir].tuple.dst.u.udp.port) ct 202 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle Via header"); ct 210 net/netfilter/nf_nat_sip.c if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, ct 213 net/netfilter/nf_nat_sip.c nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) && ct 214 net/netfilter/nf_nat_sip.c !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3)) { ct 215 net/netfilter/nf_nat_sip.c buflen = sip_sprintf_addr(ct, buffer, ct 216 net/netfilter/nf_nat_sip.c &ct->tuplehash[!dir].tuple.dst.u3, ct 220 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle maddr"); ct 227 net/netfilter/nf_nat_sip.c if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, ct 230 net/netfilter/nf_nat_sip.c nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.dst.u3) && ct 231 net/netfilter/nf_nat_sip.c !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.src.u3)) { ct 232 net/netfilter/nf_nat_sip.c buflen = sip_sprintf_addr(ct, buffer, ct 233 net/netfilter/nf_nat_sip.c &ct->tuplehash[!dir].tuple.src.u3, ct 237 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle received"); ct 244 net/netfilter/nf_nat_sip.c if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen, ct 247 net/netfilter/nf_nat_sip.c htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port && ct 248 net/netfilter/nf_nat_sip.c htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { ct 249 net/netfilter/nf_nat_sip.c __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; ct 253 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle rport"); ct 263 net/netfilter/nf_nat_sip.c while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, ct 270 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle contact"); ct 277 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle SIP from/to"); ct 286 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle packet"); ct 293 net/netfilter/nf_nat_sip.c if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff, ct 295 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle packet"); ct 307 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 310 net/netfilter/nf_nat_sip.c if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0) ct 314 net/netfilter/nf_nat_sip.c nf_ct_seqadj_set(ct, ctinfo, th->seq, off); ct 318 net/netfilter/nf_nat_sip.c static void nf_nat_sip_expected(struct nf_conn *ct, ct 321 net/netfilter/nf_nat_sip.c struct nf_conn_help *help = nfct_help(ct->master); ct 327 net/netfilter/nf_nat_sip.c BUG_ON(ct->status & IPS_NAT_DONE_MASK); ct 333 net/netfilter/nf_nat_sip.c nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); ct 341 net/netfilter/nf_nat_sip.c if (pair_exp->tuple.src.l3num == nf_ct_l3num(ct) && ct 342 net/netfilter/nf_nat_sip.c pair_exp->tuple.dst.protonum == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum && ct 343 net/netfilter/nf_nat_sip.c nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, &pair_exp->saved_addr) && ct 344 net/netfilter/nf_nat_sip.c ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all == pair_exp->saved_proto.all) { ct 360 net/netfilter/nf_nat_sip.c nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, ct 361 net/netfilter/nf_nat_sip.c &ct->master->tuplehash[exp->dir].tuple.src.u3)) { ct 364 net/netfilter/nf_nat_sip.c = ct->master->tuplehash[!exp->dir].tuple.dst.u3; ct 370 net/netfilter/nf_nat_sip.c nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); ct 381 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 383 net/netfilter/nf_nat_sip.c struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ct 391 net/netfilter/nf_nat_sip.c if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, ct 392 net/netfilter/nf_nat_sip.c &ct->tuplehash[!dir].tuple.dst.u3)) ct 395 net/netfilter/nf_nat_sip.c newaddr = ct->tuplehash[!dir].tuple.dst.u3; ct 401 net/netfilter/nf_nat_sip.c ct->tuplehash[dir].tuple.src.u.udp.port; ct 403 net/netfilter/nf_nat_sip.c port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port); ct 427 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "all ports in use for SIP"); ct 433 net/netfilter/nf_nat_sip.c buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port); ct 436 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle packet"); ct 452 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 458 net/netfilter/nf_nat_sip.c if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, ct 465 net/netfilter/nf_nat_sip.c if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CONTENT_LENGTH, ct 483 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 486 net/netfilter/nf_nat_sip.c if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term, ct 502 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 506 net/netfilter/nf_nat_sip.c buflen = sip_sprintf_addr(ct, buffer, addr, false); ct 539 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 544 net/netfilter/nf_nat_sip.c buflen = sip_sprintf_addr(ct, buffer, addr, false); ct 581 net/netfilter/nf_nat_sip.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 586 net/netfilter/nf_nat_sip.c if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, ct 587 net/netfilter/nf_nat_sip.c &ct->tuplehash[!dir].tuple.dst.u3)) ct 590 net/netfilter/nf_nat_sip.c *rtp_addr = ct->tuplehash[!dir].tuple.dst.u3; ct 634 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "all ports in use for SDP media"); ct 642 net/netfilter/nf_nat_sip.c nf_ct_helper_log(skb, ct, "cannot mangle SDP message"); ct 27 net/netfilter/nf_nat_tftp.c const struct nf_conn *ct = exp->master; ct 30 net/netfilter/nf_nat_tftp.c = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; ct 181 net/netfilter/nf_synproxy_core.c struct tcphdr *th, struct nf_conn *ct, ct 334 net/netfilter/nf_synproxy_core.c struct nf_conn *ct; ct 337 net/netfilter/nf_synproxy_core.c ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL); ct 338 net/netfilter/nf_synproxy_core.c if (!ct) ct 341 net/netfilter/nf_synproxy_core.c if (!nfct_seqadj_ext_add(ct)) ct 343 net/netfilter/nf_synproxy_core.c if (!nfct_synproxy_ext_add(ct)) ct 346 net/netfilter/nf_synproxy_core.c __set_bit(IPS_CONFIRMED_BIT, &ct->status); ct 347 net/netfilter/nf_synproxy_core.c nf_conntrack_get(&ct->ct_general); ct 348 net/netfilter/nf_synproxy_core.c snet->tmpl = ct; ct 363 net/netfilter/nf_synproxy_core.c nf_ct_tmpl_free(ct); ct 663 net/netfilter/nf_synproxy_core.c struct nf_conn *ct; ct 670 net/netfilter/nf_synproxy_core.c ct = nf_ct_get(skb, &ctinfo); ct 671 net/netfilter/nf_synproxy_core.c if (!ct) ct 674 net/netfilter/nf_synproxy_core.c synproxy = nfct_synproxy(ct); ct 687 net/netfilter/nf_synproxy_core.c state = &ct->proto.tcp; ct 691 net/netfilter/nf_synproxy_core.c nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ct 704 net/netfilter/nf_synproxy_core.c nf_ct_seqadj_init(ct, ctinfo, 0); ct 733 net/netfilter/nf_synproxy_core.c nf_conntrack_event_cache(IPCT_SYNPROXY, ct); ct 744 net/netfilter/nf_synproxy_core.c nf_conntrack_event_cache(IPCT_SYNPROXY, ct); ct 754 net/netfilter/nf_synproxy_core.c nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); ct 755 net/netfilter/nf_synproxy_core.c nf_conntrack_event_cache(IPCT_SEQADJ, ct); ct 766 net/netfilter/nf_synproxy_core.c synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy); ct 1081 net/netfilter/nf_synproxy_core.c struct nf_conn *ct; ct 1090 net/netfilter/nf_synproxy_core.c ct = nf_ct_get(skb, &ctinfo); ct 1091 net/netfilter/nf_synproxy_core.c if (!ct) ct 1094 net/netfilter/nf_synproxy_core.c synproxy = nfct_synproxy(ct); ct 1111 net/netfilter/nf_synproxy_core.c state = &ct->proto.tcp; ct 1115 net/netfilter/nf_synproxy_core.c nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ct 1128 net/netfilter/nf_synproxy_core.c nf_ct_seqadj_init(ct, ctinfo, 0); ct 1157 net/netfilter/nf_synproxy_core.c nf_conntrack_event_cache(IPCT_SYNPROXY, ct); ct 1168 net/netfilter/nf_synproxy_core.c nf_conntrack_event_cache(IPCT_SYNPROXY, ct); ct 1178 net/netfilter/nf_synproxy_core.c nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); ct 1179 net/netfilter/nf_synproxy_core.c nf_conntrack_event_cache(IPCT_SEQADJ, ct); ct 1190 net/netfilter/nf_synproxy_core.c synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy); ct 42 net/netfilter/nfnetlink_cthelper.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 47 net/netfilter/nfnetlink_cthelper.c help = nfct_help(ct); ct 96 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) ct 98 net/netfilter/nfnetlink_cthelper.c struct nf_conn_help *help = nfct_help(ct); ct 111 net/netfilter/nfnetlink_cthelper.c nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct) ct 113 net/netfilter/nfnetlink_cthelper.c const struct nf_conn_help *help = nfct_help(ct); ct 452 net/netfilter/nfnetlink_log.c struct nf_conn *ct, enum ip_conntrack_info ctinfo) ct 630 net/netfilter/nfnetlink_log.c if (ct && nfnl_ct->build(inst->skb, ct, ctinfo, ct 691 net/netfilter/nfnetlink_log.c struct nf_conn *ct = NULL; ct 740 net/netfilter/nfnetlink_log.c ct = nfnl_ct->get_ct(skb, &ctinfo); ct 741 net/netfilter/nfnetlink_log.c if (ct != NULL) ct 742 net/netfilter/nfnetlink_log.c size += nfnl_ct->build_size(ct); ct 795 net/netfilter/nfnetlink_log.c nfnl_ct, ct, ctinfo); ct 390 net/netfilter/nfnetlink_queue.c struct nf_conn *ct = NULL; ct 449 net/netfilter/nfnetlink_queue.c ct = nfnl_ct->get_ct(entskb, &ctinfo); ct 450 net/netfilter/nfnetlink_queue.c if (ct != NULL) ct 451 net/netfilter/nfnetlink_queue.c size += nfnl_ct->build_size(ct); ct 599 net/netfilter/nfnetlink_queue.c if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0) ct 642 net/netfilter/nfnetlink_queue.c const struct nf_conn *ct = (void *)skb_nfct(entry->skb); ct 644 net/netfilter/nfnetlink_queue.c if (ct && ((ct->status & flags) == IPS_DYING)) ct 1115 net/netfilter/nfnetlink_queue.c struct nf_conn *ct; ct 1117 net/netfilter/nfnetlink_queue.c ct = nfnl_ct->get_ct(entry->skb, ctinfo); ct 1118 net/netfilter/nfnetlink_queue.c if (ct == NULL) ct 1121 net/netfilter/nfnetlink_queue.c if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0) ct 1125 net/netfilter/nfnetlink_queue.c nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct, ct 1128 net/netfilter/nfnetlink_queue.c return ct; ct 1181 net/netfilter/nfnetlink_queue.c struct nf_conn *ct = NULL; ct 1205 net/netfilter/nfnetlink_queue.c ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); ct 1222 net/netfilter/nfnetlink_queue.c if (ct && diff) ct 1223 net/netfilter/nfnetlink_queue.c nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff); ct 31 net/netfilter/nft_connlimit.c const struct nf_conn *ct; ct 36 net/netfilter/nft_connlimit.c ct = nf_ct_get(pkt->skb, &ctinfo); ct 37 net/netfilter/nft_connlimit.c if (ct != NULL) { ct 38 net/netfilter/nft_connlimit.c tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 39 net/netfilter/nft_connlimit.c zone = nf_ct_zone(ct); ct 65 net/netfilter/nft_ct.c const struct nf_conn *ct; ct 71 net/netfilter/nft_ct.c ct = nf_ct_get(pkt->skb, &ctinfo); ct 75 net/netfilter/nft_ct.c if (ct) ct 87 net/netfilter/nft_ct.c if (ct == NULL) ct 95 net/netfilter/nft_ct.c *dest = ct->status; ct 99 net/netfilter/nft_ct.c *dest = ct->mark; ct 104 net/netfilter/nft_ct.c *dest = ct->secmark; ct 108 net/netfilter/nft_ct.c *dest = jiffies_to_msecs(nf_ct_expires(ct)); ct 111 net/netfilter/nft_ct.c if (ct->master == NULL) ct 113 net/netfilter/nft_ct.c help = nfct_help(ct->master); ct 123 net/netfilter/nft_ct.c struct nf_conn_labels *labels = nf_ct_labels_find(ct); ct 134 net/netfilter/nft_ct.c const struct nf_conn_acct *acct = nf_conn_acct_find(ct); ct 144 net/netfilter/nft_ct.c const struct nf_conn_acct *acct = nf_conn_acct_find(ct); ct 160 net/netfilter/nft_ct.c nft_reg_store8(dest, nf_ct_l3num(ct)); ct 163 net/netfilter/nft_ct.c nft_reg_store8(dest, nf_ct_protonum(ct)); ct 167 net/netfilter/nft_ct.c const struct nf_conntrack_zone *zone = nf_ct_zone(ct); ct 180 net/netfilter/nft_ct.c if (!nf_ct_is_confirmed(ct)) ct 182 net/netfilter/nft_ct.c *dest = nf_ct_get_id(ct); ct 188 net/netfilter/nft_ct.c tuple = &ct->tuplehash[priv->dir].tuple; ct 192 net/netfilter/nft_ct.c nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); ct 196 net/netfilter/nft_ct.c nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); ct 205 net/netfilter/nft_ct.c if (nf_ct_l3num(ct) != NFPROTO_IPV4) ct 210 net/netfilter/nft_ct.c if (nf_ct_l3num(ct) != NFPROTO_IPV4) ct 215 net/netfilter/nft_ct.c if (nf_ct_l3num(ct) != NFPROTO_IPV6) ct 220 net/netfilter/nft_ct.c if (nf_ct_l3num(ct) != NFPROTO_IPV6) ct 242 net/netfilter/nft_ct.c struct nf_conn *ct; ct 244 net/netfilter/nft_ct.c ct = nf_ct_get(skb, &ctinfo); ct 245 net/netfilter/nft_ct.c if (ct) /* already tracked */ ct 261 net/netfilter/nft_ct.c ct = this_cpu_read(nft_ct_pcpu_template); ct 263 net/netfilter/nft_ct.c if (likely(atomic_read(&ct->ct_general.use) == 1)) { ct 264 net/netfilter/nft_ct.c nf_ct_zone_add(ct, &zone); ct 267 net/netfilter/nft_ct.c ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC); ct 268 net/netfilter/nft_ct.c if (!ct) { ct 274 net/netfilter/nft_ct.c atomic_inc(&ct->ct_general.use); ct 275 net/netfilter/nft_ct.c nf_ct_set(skb, ct, IP_CT_NEW); ct 289 net/netfilter/nft_ct.c struct nf_conn *ct; ct 291 net/netfilter/nft_ct.c ct = nf_ct_get(skb, &ctinfo); ct 292 net/netfilter/nft_ct.c if (ct == NULL || nf_ct_is_template(ct)) ct 298 net/netfilter/nft_ct.c if (ct->mark != value) { ct 299 net/netfilter/nft_ct.c ct->mark = value; ct 300 net/netfilter/nft_ct.c nf_conntrack_event_cache(IPCT_MARK, ct); ct 306 net/netfilter/nft_ct.c if (ct->secmark != value) { ct 307 net/netfilter/nft_ct.c ct->secmark = value; ct 308 net/netfilter/nft_ct.c nf_conntrack_event_cache(IPCT_SECMARK, ct); ct 314 net/netfilter/nft_ct.c nf_connlabels_replace(ct, ct 322 net/netfilter/nft_ct.c struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct); ct 331 net/netfilter/nft_ct.c if (ctmask && !nf_ct_is_confirmed(ct)) ct 332 net/netfilter/nft_ct.c nf_ct_ecache_ext_add(ct, ctmask, 0, GFP_ATOMIC); ct 351 net/netfilter/nft_ct.c struct nf_conn *ct; ct 355 net/netfilter/nft_ct.c ct = per_cpu(nft_ct_pcpu_template, cpu); ct 356 net/netfilter/nft_ct.c if (!ct) ct 358 net/netfilter/nft_ct.c nf_ct_put(ct); ct 768 net/netfilter/nft_ct.c struct nf_conn *ct; ct 770 net/netfilter/nft_ct.c ct = nf_ct_get(pkt->skb, &ctinfo); ct 772 net/netfilter/nft_ct.c if (ct || ctinfo == IP_CT_UNTRACKED) ct 775 net/netfilter/nft_ct.c nf_ct_set(skb, ct, IP_CT_UNTRACKED); ct 831 net/netfilter/nft_ct.c struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); ct 838 net/netfilter/nft_ct.c if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct)) ct 841 net/netfilter/nft_ct.c timeout = nf_ct_timeout_find(ct); ct 843 net/netfilter/nft_ct.c timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC); ct 857 net/netfilter/nft_ct.c nf_ct_refresh(ct, pkt->skb, values[0]); ct 1067 net/netfilter/nft_ct.c struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); ct 1071 net/netfilter/nft_ct.c if (!ct || ct 1072 net/netfilter/nft_ct.c nf_ct_is_confirmed(ct) || ct 1073 net/netfilter/nft_ct.c nf_ct_is_template(ct) || ct 1074 net/netfilter/nft_ct.c priv->l4proto != nf_ct_protonum(ct)) ct 1077 net/netfilter/nft_ct.c switch (nf_ct_l3num(ct)) { ct 1092 net/netfilter/nft_ct.c if (test_bit(IPS_HELPER_BIT, &ct->status)) ct 1095 net/netfilter/nft_ct.c help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); ct 1098 net/netfilter/nft_ct.c set_bit(IPS_HELPER_BIT, &ct->status); ct 1220 net/netfilter/nft_ct.c struct nf_conn *ct; ct 1222 net/netfilter/nft_ct.c ct = nf_ct_get(pkt->skb, &ctinfo); ct 1223 net/netfilter/nft_ct.c if (!ct || ctinfo == IP_CT_UNTRACKED) { ct 1229 net/netfilter/nft_ct.c help = nfct_help(ct); ct 1231 net/netfilter/nft_ct.c help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); ct 1242 net/netfilter/nft_ct.c l3num = nf_ct_l3num(ct); ct 1244 net/netfilter/nft_ct.c exp = nf_ct_expect_alloc(ct); ct 1250 net/netfilter/nft_ct.c &ct->tuplehash[!dir].tuple.src.u3, ct 1251 net/netfilter/nft_ct.c &ct->tuplehash[!dir].tuple.dst.u3, ct 23 net/netfilter/nft_flow_offload.c const struct nf_conn *ct, ct 34 net/netfilter/nft_flow_offload.c fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; ct 38 net/netfilter/nft_flow_offload.c fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; ct 81 net/netfilter/nft_flow_offload.c struct nf_conn *ct; ct 87 net/netfilter/nft_flow_offload.c ct = nf_ct_get(pkt->skb, &ctinfo); ct 88 net/netfilter/nft_flow_offload.c if (!ct) ct 91 net/netfilter/nft_flow_offload.c switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { ct 104 net/netfilter/nft_flow_offload.c if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || ct 105 net/netfilter/nft_flow_offload.c ct->status & IPS_SEQ_ADJUST) ct 108 net/netfilter/nft_flow_offload.c if (!nf_ct_is_confirmed(ct)) ct 111 net/netfilter/nft_flow_offload.c if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status)) ct 115 net/netfilter/nft_flow_offload.c if (nft_flow_route(pkt, ct, &route, dir) < 0) ct 118 net/netfilter/nft_flow_offload.c flow = flow_offload_alloc(ct, &route); ct 123 net/netfilter/nft_flow_offload.c ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct 124 net/netfilter/nft_flow_offload.c ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct 139 net/netfilter/nft_flow_offload.c clear_bit(IPS_OFFLOAD_BIT, &ct->status); ct 39 net/netfilter/nft_nat.c struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo); ct 71 net/netfilter/nft_nat.c regs->verdict.code = nf_nat_setup_info(ct, &range, priv->type); ct 1116 net/netfilter/x_tables.c struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; ct 1118 net/netfilter/x_tables.c u_int16_t tsize = ct->u.user.target_size; ct 1122 net/netfilter/x_tables.c memcpy(t, ct, sizeof(*ct)); ct 1124 net/netfilter/x_tables.c target->compat_from_user(t->data, ct->data); ct 1126 net/netfilter/x_tables.c memcpy(t->data, ct->data, tsize - sizeof(*ct)); ct 1146 net/netfilter/x_tables.c struct compat_xt_entry_target __user *ct = *dstptr; ct 1150 net/netfilter/x_tables.c if (XT_OBJ_TO_USER(ct, t, target, tsize)) ct 1154 net/netfilter/x_tables.c if (target->compat_to_user((void __user *)ct->data, t->data)) ct 1157 net/netfilter/x_tables.c if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) ct 35 net/netfilter/xt_CONNSECMARK.c struct nf_conn *ct; ct 38 net/netfilter/xt_CONNSECMARK.c ct = nf_ct_get(skb, &ctinfo); ct 39 net/netfilter/xt_CONNSECMARK.c if (ct && !ct->secmark) { ct 40 net/netfilter/xt_CONNSECMARK.c ct->secmark = skb->secmark; ct 41 net/netfilter/xt_CONNSECMARK.c nf_conntrack_event_cache(IPCT_SECMARK, ct); ct 53 net/netfilter/xt_CONNSECMARK.c const struct nf_conn *ct; ct 56 net/netfilter/xt_CONNSECMARK.c ct = nf_ct_get(skb, &ctinfo); ct 57 net/netfilter/xt_CONNSECMARK.c if (ct && ct->secmark) ct 58 net/netfilter/xt_CONNSECMARK.c skb->secmark = ct->secmark; ct 20 net/netfilter/xt_CT.c static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) ct 26 net/netfilter/xt_CT.c if (ct) { ct 27 net/netfilter/xt_CT.c atomic_inc(&ct->ct_general.use); ct 28 net/netfilter/xt_CT.c nf_ct_set(skb, ct, IP_CT_NEW); ct 30 net/netfilter/xt_CT.c nf_ct_set(skb, ct, IP_CT_UNTRACKED); ct 40 net/netfilter/xt_CT.c struct nf_conn *ct = info->ct; ct 42 net/netfilter/xt_CT.c return xt_ct_target(skb, ct); ct 49 net/netfilter/xt_CT.c struct nf_conn *ct = info->ct; ct 51 net/netfilter/xt_CT.c return xt_ct_target(skb, ct); ct 73 net/netfilter/xt_CT.c xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, ct 93 net/netfilter/xt_CT.c help = nf_ct_helper_ext_add(ct, GFP_KERNEL); ct 104 net/netfilter/xt_CT.c xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, ct 118 net/netfilter/xt_CT.c return nf_ct_set_timeout(par->net, ct, par->family, l4proto->l4proto, ct 144 net/netfilter/xt_CT.c struct nf_conn *ct; ct 148 net/netfilter/xt_CT.c ct = NULL; ct 169 net/netfilter/xt_CT.c ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL); ct 170 net/netfilter/xt_CT.c if (!ct) { ct 177 net/netfilter/xt_CT.c !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events, ct 189 net/netfilter/xt_CT.c ret = xt_ct_set_helper(ct, info->helper, par); ct 200 net/netfilter/xt_CT.c ret = xt_ct_set_timeout(ct, par, info->timeout); ct 204 net/netfilter/xt_CT.c __set_bit(IPS_CONFIRMED_BIT, &ct->status); ct 205 net/netfilter/xt_CT.c nf_conntrack_get(&ct->ct_general); ct 207 net/netfilter/xt_CT.c info->ct = ct; ct 211 net/netfilter/xt_CT.c help = nfct_help(ct); ct 215 net/netfilter/xt_CT.c nf_ct_tmpl_free(ct); ct 242 net/netfilter/xt_CT.c info->ct = info_v1.ct; ct 270 net/netfilter/xt_CT.c struct nf_conn *ct = info->ct; ct 273 net/netfilter/xt_CT.c if (ct) { ct 274 net/netfilter/xt_CT.c help = nfct_help(ct); ct 280 net/netfilter/xt_CT.c nf_ct_destroy_timeout(ct); ct 281 net/netfilter/xt_CT.c nf_ct_put(info->ct); ct 293 net/netfilter/xt_CT.c .ct = info->ct, ct 310 net/netfilter/xt_CT.c .usersize = offsetof(struct xt_ct_target_info, ct), ct 322 net/netfilter/xt_CT.c .usersize = offsetof(struct xt_ct_target_info, ct), ct 334 net/netfilter/xt_CT.c .usersize = offsetof(struct xt_ct_target_info, ct), ct 82 net/netfilter/xt_HMARK.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 86 net/netfilter/xt_HMARK.c if (ct == NULL) ct 89 net/netfilter/xt_HMARK.c otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 90 net/netfilter/xt_HMARK.c rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 100 net/netfilter/xt_HMARK.c t->proto = nf_ct_protonum(ct); ct 23 net/netfilter/xt_NETMAP.c struct nf_conn *ct; ct 28 net/netfilter/xt_NETMAP.c ct = nf_ct_get(skb, &ctinfo); ct 51 net/netfilter/xt_NETMAP.c return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(xt_hooknum(par))); ct 71 net/netfilter/xt_NETMAP.c struct nf_conn *ct; ct 81 net/netfilter/xt_NETMAP.c ct = nf_ct_get(skb, &ctinfo); ct 101 net/netfilter/xt_NETMAP.c return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(xt_hooknum(par))); ct 16 net/netfilter/xt_cluster.c static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct) ct 18 net/netfilter/xt_cluster.c return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; ct 21 net/netfilter/xt_cluster.c static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct) ct 23 net/netfilter/xt_cluster.c return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; ct 39 net/netfilter/xt_cluster.c xt_cluster_hash(const struct nf_conn *ct, ct 44 net/netfilter/xt_cluster.c switch(nf_ct_l3num(ct)) { ct 46 net/netfilter/xt_cluster.c hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); ct 49 net/netfilter/xt_cluster.c hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); ct 83 net/netfilter/xt_cluster.c const struct nf_conn *ct; ct 109 net/netfilter/xt_cluster.c ct = nf_ct_get(skb, &ctinfo); ct 110 net/netfilter/xt_cluster.c if (ct == NULL) ct 113 net/netfilter/xt_cluster.c if (ct->master) ct 114 net/netfilter/xt_cluster.c hash = xt_cluster_hash(ct->master, info); ct 116 net/netfilter/xt_cluster.c hash = xt_cluster_hash(ct, info); ct 24 net/netfilter/xt_connbytes.c const struct nf_conn *ct; ct 32 net/netfilter/xt_connbytes.c ct = nf_ct_get(skb, &ctinfo); ct 33 net/netfilter/xt_connbytes.c if (!ct) ct 36 net/netfilter/xt_connbytes.c acct = nf_conn_acct_find(ct); ct 25 net/netfilter/xt_connlabel.c struct nf_conn *ct; ct 28 net/netfilter/xt_connlabel.c ct = nf_ct_get(skb, &ctinfo); ct 29 net/netfilter/xt_connlabel.c if (ct == NULL) ct 32 net/netfilter/xt_connlabel.c labels = nf_ct_labels_find(ct); ct 41 net/netfilter/xt_connlabel.c nf_conntrack_event_cache(IPCT_LABEL, ct); ct 38 net/netfilter/xt_connlimit.c const struct nf_conn *ct; ct 42 net/netfilter/xt_connlimit.c ct = nf_ct_get(skb, &ctinfo); ct 43 net/netfilter/xt_connlimit.c if (ct != NULL) { ct 44 net/netfilter/xt_connlimit.c tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 45 net/netfilter/xt_connlimit.c zone = nf_ct_zone(ct); ct 31 net/netfilter/xt_connmark.c struct nf_conn *ct; ct 34 net/netfilter/xt_connmark.c ct = nf_ct_get(skb, &ctinfo); ct 35 net/netfilter/xt_connmark.c if (ct == NULL) ct 40 net/netfilter/xt_connmark.c newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; ct 46 net/netfilter/xt_connmark.c if (ct->mark != newmark) { ct 47 net/netfilter/xt_connmark.c ct->mark = newmark; ct 48 net/netfilter/xt_connmark.c nf_conntrack_event_cache(IPCT_MARK, ct); ct 58 net/netfilter/xt_connmark.c newmark = (ct->mark & ~info->ctmask) ^ ct 60 net/netfilter/xt_connmark.c if (ct->mark != newmark) { ct 61 net/netfilter/xt_connmark.c ct->mark = newmark; ct 62 net/netfilter/xt_connmark.c nf_conntrack_event_cache(IPCT_MARK, ct); ct 66 net/netfilter/xt_connmark.c new_targetmark = (ct->mark & info->ctmask); ct 123 net/netfilter/xt_connmark.c const struct nf_conn *ct; ct 125 net/netfilter/xt_connmark.c ct = nf_ct_get(skb, &ctinfo); ct 126 net/netfilter/xt_connmark.c if (ct == NULL) ct 129 net/netfilter/xt_connmark.c return ((ct->mark & info->mask) == info->mark) ^ info->invert; ct 40 net/netfilter/xt_conntrack.c conntrack_mt_origsrc(const struct nf_conn *ct, ct 44 net/netfilter/xt_conntrack.c return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, ct 49 net/netfilter/xt_conntrack.c conntrack_mt_origdst(const struct nf_conn *ct, ct 53 net/netfilter/xt_conntrack.c return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3, ct 58 net/netfilter/xt_conntrack.c conntrack_mt_replsrc(const struct nf_conn *ct, ct 62 net/netfilter/xt_conntrack.c return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3, ct 67 net/netfilter/xt_conntrack.c conntrack_mt_repldst(const struct nf_conn *ct, ct 71 net/netfilter/xt_conntrack.c return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3, ct 77 net/netfilter/xt_conntrack.c const struct nf_conn *ct) ct 81 net/netfilter/xt_conntrack.c tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 83 net/netfilter/xt_conntrack.c (nf_ct_protonum(ct) == info->l4proto) ^ ct 98 net/netfilter/xt_conntrack.c tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 121 net/netfilter/xt_conntrack.c const struct nf_conn *ct) ct 125 net/netfilter/xt_conntrack.c tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 127 net/netfilter/xt_conntrack.c (nf_ct_protonum(ct) == info->l4proto) ^ ct 144 net/netfilter/xt_conntrack.c tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ct 167 net/netfilter/xt_conntrack.c const struct nf_conn *ct; ct 170 net/netfilter/xt_conntrack.c ct = nf_ct_get(skb, &ctinfo); ct 172 net/netfilter/xt_conntrack.c if (ct) ct 180 net/netfilter/xt_conntrack.c if (ct != NULL) { ct 181 net/netfilter/xt_conntrack.c if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) ct 183 net/netfilter/xt_conntrack.c if (test_bit(IPS_DST_NAT_BIT, &ct->status)) ct 191 net/netfilter/xt_conntrack.c if (ct == NULL) ct 199 net/netfilter/xt_conntrack.c if (conntrack_mt_origsrc(ct, info, xt_family(par)) ^ ct 204 net/netfilter/xt_conntrack.c if (conntrack_mt_origdst(ct, info, xt_family(par)) ^ ct 209 net/netfilter/xt_conntrack.c if (conntrack_mt_replsrc(ct, info, xt_family(par)) ^ ct 214 net/netfilter/xt_conntrack.c if (conntrack_mt_repldst(ct, info, xt_family(par)) ^ ct 219 net/netfilter/xt_conntrack.c if (!ct_proto_port_check(info, ct)) ct 222 net/netfilter/xt_conntrack.c if (!ct_proto_port_check_v3(par->matchinfo, ct)) ct 227 net/netfilter/xt_conntrack.c (!!(status_mask & ct->status) ^ ct 232 net/netfilter/xt_conntrack.c unsigned long expires = nf_ct_expires(ct) / HZ; ct 27 net/netfilter/xt_helper.c const struct nf_conn *ct; ct 33 net/netfilter/xt_helper.c ct = nf_ct_get(skb, &ctinfo); ct 34 net/netfilter/xt_helper.c if (!ct || !ct->master) ct 37 net/netfilter/xt_helper.c master_help = nfct_help(ct->master); ct 118 net/netfilter/xt_ipvs.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 120 net/netfilter/xt_ipvs.c if (ct == NULL) { ct 57 net/netfilter/xt_nat.c struct nf_conn *ct; ct 59 net/netfilter/xt_nat.c ct = nf_ct_get(skb, &ctinfo); ct 60 net/netfilter/xt_nat.c WARN_ON(!(ct != NULL && ct 65 net/netfilter/xt_nat.c return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); ct 74 net/netfilter/xt_nat.c struct nf_conn *ct; ct 76 net/netfilter/xt_nat.c ct = nf_ct_get(skb, &ctinfo); ct 77 net/netfilter/xt_nat.c WARN_ON(!(ct != NULL && ct 81 net/netfilter/xt_nat.c return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); ct 90 net/netfilter/xt_nat.c struct nf_conn *ct; ct 92 net/netfilter/xt_nat.c ct = nf_ct_get(skb, &ctinfo); ct 93 net/netfilter/xt_nat.c WARN_ON(!(ct != NULL && ct 100 net/netfilter/xt_nat.c return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); ct 109 net/netfilter/xt_nat.c struct nf_conn *ct; ct 111 net/netfilter/xt_nat.c ct = nf_ct_get(skb, &ctinfo); ct 112 net/netfilter/xt_nat.c WARN_ON(!(ct != NULL && ct 118 net/netfilter/xt_nat.c return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); ct 126 net/netfilter/xt_nat.c struct nf_conn *ct; ct 128 net/netfilter/xt_nat.c ct = nf_ct_get(skb, &ctinfo); ct 129 net/netfilter/xt_nat.c WARN_ON(!(ct != NULL && ct 133 net/netfilter/xt_nat.c return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC); ct 141 net/netfilter/xt_nat.c struct nf_conn *ct; ct 143 net/netfilter/xt_nat.c ct = nf_ct_get(skb, &ctinfo); ct 144 net/netfilter/xt_nat.c WARN_ON(!(ct != NULL && ct 147 net/netfilter/xt_nat.c return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST); ct 26 net/netfilter/xt_state.c struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ct 28 net/netfilter/xt_state.c if (ct) ct 20 net/netfilter/xt_u32.c const struct xt_u32_test *ct; ct 35 net/netfilter/xt_u32.c ct = &data->tests[testind]; ct 37 net/netfilter/xt_u32.c pos = ct->location[0].number; ct 45 net/netfilter/xt_u32.c nnums = ct->nnums; ct 49 net/netfilter/xt_u32.c u_int32_t number = ct->location[i].number; ct 50 net/netfilter/xt_u32.c switch (ct->location[i].nextop) { ct 78 net/netfilter/xt_u32.c nvals = ct->nvalues; ct 80 net/netfilter/xt_u32.c if (ct->value[i].min <= val && val <= ct->value[i].max) ct 83 net/netfilter/xt_u32.c if (i >= ct->nvalues) ct 60 net/openvswitch/conntrack.c struct nf_conn *ct; ct 150 net/openvswitch/conntrack.c static u32 ovs_ct_get_mark(const struct nf_conn *ct) ct 153 net/openvswitch/conntrack.c return ct ? ct->mark : 0; ct 164 net/openvswitch/conntrack.c static void ovs_ct_get_labels(const struct nf_conn *ct, ct 167 net/openvswitch/conntrack.c struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL; ct 181 net/openvswitch/conntrack.c key->ct.orig_tp.src = htons(orig->dst.u.icmp.type); ct 182 net/openvswitch/conntrack.c key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code); ct 184 net/openvswitch/conntrack.c key->ct.orig_tp.src = orig->src.u.all; ct 185 net/openvswitch/conntrack.c key->ct.orig_tp.dst = orig->dst.u.all; ct 191 net/openvswitch/conntrack.c const struct nf_conn *ct) ct 195 net/openvswitch/conntrack.c key->ct.mark = ovs_ct_get_mark(ct); ct 196 net/openvswitch/conntrack.c ovs_ct_get_labels(ct, &key->ct.labels); ct 198 net/openvswitch/conntrack.c if (ct) { ct 202 net/openvswitch/conntrack.c if (ct->master) ct 203 net/openvswitch/conntrack.c ct = ct->master; ct 204 net/openvswitch/conntrack.c orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ct 208 net/openvswitch/conntrack.c nf_ct_l3num(ct) == NFPROTO_IPV4) { ct 215 net/openvswitch/conntrack.c nf_ct_l3num(ct) == NFPROTO_IPV6) { ct 240 net/openvswitch/conntrack.c struct nf_conn *ct; ct 243 net/openvswitch/conntrack.c ct = nf_ct_get(skb, &ctinfo); ct 244 net/openvswitch/conntrack.c if (ct) { ct 247 net/openvswitch/conntrack.c if (!nf_ct_is_confirmed(ct)) ct 252 net/openvswitch/conntrack.c if (ct->master) ct 257 net/openvswitch/conntrack.c if (ct->status & IPS_SRC_NAT) ct 259 net/openvswitch/conntrack.c if (ct->status & IPS_DST_NAT) ct 262 net/openvswitch/conntrack.c zone = nf_ct_zone(ct); ct 268 net/openvswitch/conntrack.c __ovs_ct_update_key(key, state, zone, ct); ct 294 net/openvswitch/conntrack.c nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark)) ct 298 net/openvswitch/conntrack.c nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels), ct 299 net/openvswitch/conntrack.c &output->ct.labels)) ct 307 net/openvswitch/conntrack.c output->ct.orig_tp.src, ct 308 net/openvswitch/conntrack.c output->ct.orig_tp.dst, ct 318 net/openvswitch/conntrack.c output->ct.orig_tp.src, ct 319 net/openvswitch/conntrack.c output->ct.orig_tp.dst, ct 331 net/openvswitch/conntrack.c static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key, ct 337 net/openvswitch/conntrack.c new_mark = ct_mark | (ct->mark & ~(mask)); ct 338 net/openvswitch/conntrack.c if (ct->mark != new_mark) { ct 339 net/openvswitch/conntrack.c ct->mark = new_mark; ct 340 net/openvswitch/conntrack.c if (nf_ct_is_confirmed(ct)) ct 341 net/openvswitch/conntrack.c nf_conntrack_event_cache(IPCT_MARK, ct); ct 342 net/openvswitch/conntrack.c key->ct.mark = new_mark; ct 351 net/openvswitch/conntrack.c static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct) ct 355 net/openvswitch/conntrack.c cl = nf_ct_labels_find(ct); ct 357 net/openvswitch/conntrack.c nf_ct_labels_ext_add(ct); ct 358 net/openvswitch/conntrack.c cl = nf_ct_labels_find(ct); ct 368 net/openvswitch/conntrack.c static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, ct 376 net/openvswitch/conntrack.c master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL; ct 381 net/openvswitch/conntrack.c cl = ovs_ct_get_conn_labels(ct); ct 402 net/openvswitch/conntrack.c nf_conntrack_event_cache(IPCT_LABEL, ct); ct 404 net/openvswitch/conntrack.c memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); ct 409 net/openvswitch/conntrack.c static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key, ct 416 net/openvswitch/conntrack.c cl = ovs_ct_get_conn_labels(ct); ct 420 net/openvswitch/conntrack.c err = nf_connlabels_replace(ct, labels->ct_labels_32, ct 426 net/openvswitch/conntrack.c memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); ct 438 net/openvswitch/conntrack.c struct nf_conn *ct; ct 441 net/openvswitch/conntrack.c ct = nf_ct_get(skb, &ctinfo); ct 442 net/openvswitch/conntrack.c if (!ct || ctinfo == IP_CT_RELATED_REPLY) ct 445 net/openvswitch/conntrack.c help = nfct_help(ct); ct 476 net/openvswitch/conntrack.c err = helper->help(skb, protoff, ct, ctinfo); ct 484 net/openvswitch/conntrack.c if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && ct 485 net/openvswitch/conntrack.c !nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) ct 570 net/openvswitch/conntrack.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ct 572 net/openvswitch/conntrack.c nf_ct_delete(ct, 0, 0); ct 573 net/openvswitch/conntrack.c nf_conntrack_put(&ct->ct_general); ct 584 net/openvswitch/conntrack.c const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ct 589 net/openvswitch/conntrack.c if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) ct 591 net/openvswitch/conntrack.c if (test_bit(IPS_EXPECTED_BIT, &ct->status)) ct 611 net/openvswitch/conntrack.c struct nf_conn *ct; ct 635 net/openvswitch/conntrack.c ct = nf_ct_tuplehash_to_ctrack(h); ct 642 net/openvswitch/conntrack.c h = &ct->tuplehash[!h->tuple.dst.dir]; ct 644 net/openvswitch/conntrack.c nf_ct_set(skb, ct, ovs_ct_get_info(h)); ct 645 net/openvswitch/conntrack.c return ct; ct 655 net/openvswitch/conntrack.c struct nf_conn *ct = NULL; ct 668 net/openvswitch/conntrack.c ct = ovs_ct_find_existing(net, &info->zone, info->family, skb, ct 673 net/openvswitch/conntrack.c return ct; ct 683 net/openvswitch/conntrack.c struct nf_conn *ct; ct 686 net/openvswitch/conntrack.c ct = nf_ct_get(skb, &ctinfo); ct 687 net/openvswitch/conntrack.c if (!ct) ct 688 net/openvswitch/conntrack.c ct = ovs_ct_executed(net, key, info, skb, &ct_executed); ct 690 net/openvswitch/conntrack.c if (ct) ct 695 net/openvswitch/conntrack.c if (!net_eq(net, read_pnet(&ct->ct_net))) ct 697 net/openvswitch/conntrack.c if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct))) ct 702 net/openvswitch/conntrack.c help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER); ct 709 net/openvswitch/conntrack.c timeout_ext = nf_ct_timeout_find(ct); ct 719 net/openvswitch/conntrack.c if (nf_ct_is_confirmed(ct)) ct 720 net/openvswitch/conntrack.c nf_ct_delete(ct, 0, 0); ct 722 net/openvswitch/conntrack.c nf_conntrack_put(&ct->ct_general); ct 735 net/openvswitch/conntrack.c static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, ct 757 net/openvswitch/conntrack.c if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, ct 770 net/openvswitch/conntrack.c if (!nf_nat_icmpv6_reply_translation(skb, ct, ct 784 net/openvswitch/conntrack.c if (!nf_nat_initialized(ct, maniptype)) { ct 790 net/openvswitch/conntrack.c ? nf_nat_setup_info(ct, range, maniptype) ct 791 net/openvswitch/conntrack.c : nf_nat_alloc_null_binding(ct, hooknum); ct 806 net/openvswitch/conntrack.c err = nf_nat_packet(ct, ctinfo, hooknum, skb); ct 868 net/openvswitch/conntrack.c struct sk_buff *skb, struct nf_conn *ct, ct 875 net/openvswitch/conntrack.c if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) ct 884 net/openvswitch/conntrack.c ct->status & IPS_NAT_MASK && ct 892 net/openvswitch/conntrack.c maniptype = ct->status & IPS_SRC_NAT ct 895 net/openvswitch/conntrack.c maniptype = ct->status & IPS_SRC_NAT ct 904 net/openvswitch/conntrack.c err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); ct 907 net/openvswitch/conntrack.c ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { ct 913 net/openvswitch/conntrack.c err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, ct 926 net/openvswitch/conntrack.c struct sk_buff *skb, struct nf_conn *ct, ct 950 net/openvswitch/conntrack.c struct nf_conn *ct; ct 958 net/openvswitch/conntrack.c struct nf_conn *tmpl = info->ct; ct 983 net/openvswitch/conntrack.c ct = nf_ct_get(skb, &ctinfo); ct 984 net/openvswitch/conntrack.c if (ct) { ct 996 net/openvswitch/conntrack.c (nf_ct_is_confirmed(ct) || info->commit) && ct 997 net/openvswitch/conntrack.c ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) { ct 1006 net/openvswitch/conntrack.c if (!nf_ct_is_confirmed(ct) && info->commit && ct 1007 net/openvswitch/conntrack.c info->helper && !nfct_help(ct)) { ct 1008 net/openvswitch/conntrack.c int err = __nf_ct_try_assign_helper(ct, info->ct, ct 1014 net/openvswitch/conntrack.c if (info->nat && !nfct_seqadj(ct)) { ct 1015 net/openvswitch/conntrack.c if (!nfct_seqadj_ext_add(ct)) ct 1025 net/openvswitch/conntrack.c if ((nf_ct_is_confirmed(ct) ? !cached : info->commit) && ct 1058 net/openvswitch/conntrack.c struct nf_conn *ct; ct 1065 net/openvswitch/conntrack.c ct = (struct nf_conn *)skb_nfct(skb); ct 1066 net/openvswitch/conntrack.c if (ct) ct 1067 net/openvswitch/conntrack.c nf_ct_deliver_cached_events(ct); ct 1173 net/openvswitch/conntrack.c struct nf_conn *ct; ct 1181 net/openvswitch/conntrack.c ct = nf_ct_get(skb, &ctinfo); ct 1182 net/openvswitch/conntrack.c if (!ct) ct 1187 net/openvswitch/conntrack.c if (!nf_ct_is_confirmed(ct)) { ct 1189 net/openvswitch/conntrack.c &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); ct 1208 net/openvswitch/conntrack.c struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct); ct 1219 net/openvswitch/conntrack.c err = ovs_ct_set_mark(ct, key, info->mark.value, ct 1224 net/openvswitch/conntrack.c if (!nf_ct_is_confirmed(ct)) { ct 1225 net/openvswitch/conntrack.c err = ovs_ct_init_labels(ct, key, &info->labels.value, ct 1231 net/openvswitch/conntrack.c err = ovs_ct_set_labels(ct, key, &info->labels.value, ct 1336 net/openvswitch/conntrack.c help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL); ct 1674 net/openvswitch/conntrack.c ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL); ct 1675 net/openvswitch/conntrack.c if (!ct_info.ct) { ct 1681 net/openvswitch/conntrack.c if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto, ct 1687 net/openvswitch/conntrack.c nf_ct_timeout_find(ct_info.ct)->timeout); ct 1702 net/openvswitch/conntrack.c __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status); ct 1703 net/openvswitch/conntrack.c nf_conntrack_get(&ct_info.ct->ct_general); ct 1841 net/openvswitch/conntrack.c if (ct_info->ct) { ct 1843 net/openvswitch/conntrack.c nf_ct_destroy_timeout(ct_info->ct); ct 1844 net/openvswitch/conntrack.c nf_ct_tmpl_free(ct_info->ct); ct 81 net/openvswitch/conntrack.h key->ct.mark = 0; ct 82 net/openvswitch/conntrack.h memset(&key->ct.labels, 0, sizeof(key->ct.labels)); ct 148 net/openvswitch/flow.h } ct; ct 1222 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask); ct 1230 net/openvswitch/flow_netlink.c SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels, ct 1235 net/openvswitch/flow_netlink.c const struct ovs_key_ct_tuple_ipv4 *ct; ct 1237 net/openvswitch/flow_netlink.c ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]); ct 1239 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask); ct 1240 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask); ct 1241 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); ct 1242 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); ct 1243 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask); ct 1247 net/openvswitch/flow_netlink.c const struct ovs_key_ct_tuple_ipv6 *ct; ct 1249 net/openvswitch/flow_netlink.c ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]); ct 1251 net/openvswitch/flow_netlink.c SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src, ct 1254 net/openvswitch/flow_netlink.c SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst, ct 1257 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask); ct 1258 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask); ct 1259 net/openvswitch/flow_netlink.c SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask); ct 1922 net/openvswitch/flow_netlink.c memset(&key->ct, 0, sizeof(key->ct)); ct 47 net/sched/act_ct.c struct nf_conn *ct; ct 49 net/sched/act_ct.c ct = nf_ct_get(skb, &ctinfo); ct 50 net/sched/act_ct.c if (!ct) ct 52 net/sched/act_ct.c if (!net_eq(net, read_pnet(&ct->ct_net))) ct 54 net/sched/act_ct.c if (nf_ct_zone(ct)->id != zone_id) ct 59 net/sched/act_ct.c if (nf_ct_is_confirmed(ct)) ct 60 net/sched/act_ct.c nf_ct_kill(ct); ct 62 net/sched/act_ct.c nf_conntrack_put(&ct->ct_general); ct 155 net/sched/act_ct.c struct nf_conn *ct; ct 160 net/sched/act_ct.c ct = nf_ct_get(skb, &ctinfo); ct 161 net/sched/act_ct.c if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) ct 220 net/sched/act_ct.c static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, ct 238 net/sched/act_ct.c if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, ct 251 net/sched/act_ct.c if (!nf_nat_icmpv6_reply_translation(skb, ct, ct 265 net/sched/act_ct.c if (!nf_nat_initialized(ct, maniptype)) { ct 271 net/sched/act_ct.c ? nf_nat_setup_info(ct, range, maniptype) ct 272 net/sched/act_ct.c : nf_nat_alloc_null_binding(ct, hooknum); ct 287 net/sched/act_ct.c err = nf_nat_packet(ct, ctinfo, hooknum, skb); ct 293 net/sched/act_ct.c static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) ct 301 net/sched/act_ct.c new_mark = mark | (ct->mark & ~(mask)); ct 302 net/sched/act_ct.c if (ct->mark != new_mark) { ct 303 net/sched/act_ct.c ct->mark = new_mark; ct 304 net/sched/act_ct.c if (nf_ct_is_confirmed(ct)) ct 305 net/sched/act_ct.c nf_conntrack_event_cache(IPCT_MARK, ct); ct 310 net/sched/act_ct.c static void tcf_ct_act_set_labels(struct nf_conn *ct, ct 320 net/sched/act_ct.c nf_connlabels_replace(ct, labels, labels_m, 4); ct 325 net/sched/act_ct.c struct nf_conn *ct, ct 339 net/sched/act_ct.c if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) ct 342 net/sched/act_ct.c if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) && ct 350 net/sched/act_ct.c maniptype = ct->status & IPS_SRC_NAT ct 353 net/sched/act_ct.c maniptype = ct->status & IPS_SRC_NAT ct 363 net/sched/act_ct.c err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); ct 365 net/sched/act_ct.c ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { ct 371 net/sched/act_ct.c err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); ct 390 net/sched/act_ct.c struct nf_conn *ct; ct 402 net/sched/act_ct.c ct = nf_ct_get(skb, &ctinfo); ct 403 net/sched/act_ct.c if (ct) { ct 404 net/sched/act_ct.c nf_conntrack_put(&ct->ct_general); ct 441 net/sched/act_ct.c ct = nf_ct_get(skb, &ctinfo); ct 456 net/sched/act_ct.c ct = nf_ct_get(skb, &ctinfo); ct 457 net/sched/act_ct.c if (!ct) ct 459 net/sched/act_ct.c nf_ct_deliver_cached_events(ct); ct 461 net/sched/act_ct.c err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit); ct 466 net/sched/act_ct.c tcf_ct_act_set_mark(ct, p->mark, p->mark_mask); ct 467 net/sched/act_ct.c tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); ct 30 net/sched/act_ctinfo.c static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, ct 36 net/sched/act_ctinfo.c newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) & ct 71 net/sched/act_ctinfo.c static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca, ct 76 net/sched/act_ctinfo.c skb->mark = ct->mark & cp->cpmarkmask; ct 88 net/sched/act_ctinfo.c struct nf_conn *ct; ct 115 net/sched/act_ctinfo.c ct = nf_ct_get(skb, &ctinfo); ct 116 net/sched/act_ctinfo.c if (!ct) { /* look harder, usually ingress */ ct 127 net/sched/act_ctinfo.c ct = nf_ct_tuplehash_to_ctrack(thash); ct 131 net/sched/act_ctinfo.c if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask)) ct 132 net/sched/act_ctinfo.c tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto); ct 135 net/sched/act_ctinfo.c tcf_ctinfo_cpmark_set(ct, ca, cp, skb); ct 138 net/sched/act_ctinfo.c nf_ct_put(ct); ct 3539 net/sched/cls_api.c entry->ct.action = tcf_ct_action(act); ct 3540 net/sched/cls_api.c entry->ct.zone = tcf_ct_zone(act); ct 138 net/sched/cls_flow.c const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ ct 139 net/sched/cls_flow.c if (ct == NULL) \ ct 141 net/sched/cls_flow.c ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ ct 64 net/sched/cls_flower.c struct flow_dissector_key_ct ct; ct 1261 net/sched/cls_flower.c ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); ct 1367 net/sched/cls_flower.c FLOW_DISSECTOR_KEY_CT, ct); ct 2421 net/sched/cls_flower.c if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) ct 55 sound/soc/au1x/psc-i2s.c unsigned long ct; ct 60 sound/soc/au1x/psc-i2s.c ct = pscdata->cfg; ct 62 sound/soc/au1x/psc-i2s.c ct &= ~(PSC_I2SCFG_XM | PSC_I2SCFG_MLJ); /* left-justified */ ct 65 sound/soc/au1x/psc-i2s.c ct |= PSC_I2SCFG_XM; /* enable I2S mode */ ct 70 sound/soc/au1x/psc-i2s.c ct |= PSC_I2SCFG_MLJ; /* LSB (right-) justified */ ct 76 sound/soc/au1x/psc-i2s.c ct &= ~(PSC_I2SCFG_BI | PSC_I2SCFG_WI); /* IB-IF */ ct 79 sound/soc/au1x/psc-i2s.c ct |= PSC_I2SCFG_BI | PSC_I2SCFG_WI; ct 82 sound/soc/au1x/psc-i2s.c ct |= PSC_I2SCFG_BI; ct 85 sound/soc/au1x/psc-i2s.c ct |= PSC_I2SCFG_WI; ct 95 sound/soc/au1x/psc-i2s.c ct |= PSC_I2SCFG_MS; /* PSC I2S slave mode */ ct 98 sound/soc/au1x/psc-i2s.c ct &= ~PSC_I2SCFG_MS; /* PSC I2S Master mode */ ct 104 sound/soc/au1x/psc-i2s.c pscdata->cfg = ct; ct 971 sound/usb/pcm.c struct snd_interval *ct = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); ct 987 sound/usb/pcm.c if (fp->channels < ct->min || fp->channels > ct->max) { ct 988 sound/usb/pcm.c hwc_debug(" > check: no valid channels %d (%d/%d)\n", fp->channels, ct->min, ct->max); ct 92 tools/perf/util/stat.c ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),