cs 138 arch/alpha/kernel/time.c qemu_cs_read(struct clocksource *cs) cs 264 arch/alpha/kernel/time.c static u64 read_rpcc(struct clocksource *cs) cs 31 arch/arc/include/asm/string.h extern int strcmp(const char *cs, const char *ct); cs 84 arch/arc/plat-eznps/include/plat/ctop.h u32 csa:22, dmsid:6, __reserved:3, cs:1; cs 94 arch/arc/plat-eznps/smp.c cpu_cfg.cs = 1; cs 34 arch/arm/boot/compressed/decompress.c extern int memcmp(const void *cs, const void *ct, size_t count); cs 78 arch/arm/boot/compressed/string.c int memcmp(const void *cs, const void *ct, size_t count) cs 80 arch/arm/boot/compressed/string.c const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count; cs 91 arch/arm/boot/compressed/string.c int strcmp(const char *cs, const char *ct) cs 97 arch/arm/boot/compressed/string.c c1 = *cs++; cs 192 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo cs 449 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo cs 352 arch/arm/mach-davinci/board-da830-evm.c .cs = 3, cs 271 arch/arm/mach-davinci/board-da850-evm.c .cs = 3, cs 186 arch/arm/mach-davinci/board-dm365-evm.c .cs = 1, cs 191 arch/arm/mach-davinci/board-dm644x-evm.c .cs = 1, cs 133 arch/arm/mach-davinci/board-dm646x-evm.c .cs = 1, cs 476 arch/arm/mach-davinci/board-mityomapl138.c .cs = 1, cs 242 arch/arm/mach-davinci/board-omapl138-hawk.c .cs = 3, cs 263 arch/arm/mach-davinci/time.c static u64 read_cycles(struct clocksource *cs) cs 23 arch/arm/mach-footbridge/dc21285-timer.c static u64 cksrc_dc21285_read(struct clocksource *cs) cs 25 arch/arm/mach-footbridge/dc21285-timer.c return cs->mask - *CSR_TIMER2_VALUE; cs 28 arch/arm/mach-footbridge/dc21285-timer.c static int cksrc_dc21285_enable(struct clocksource *cs) cs 30 arch/arm/mach-footbridge/dc21285-timer.c *CSR_TIMER2_LOAD = cs->mask; cs 36 arch/arm/mach-footbridge/dc21285-timer.c static void cksrc_dc21285_disable(struct clocksource *cs) cs 143 arch/arm/mach-imx/mach-qong.c static void qong_nand_select_chip(struct nand_chip *chip, int cs) cs 145 arch/arm/mach-imx/mach-qong.c if (cs >= 0) cs 104 arch/arm/mach-imx/mx27.h #define MX27_WEIM_CSCRx_BASE_ADDR(cs) (MX27_WEIM_BASE_ADDR + (cs) * 0x10) cs 105 arch/arm/mach-imx/mx27.h #define MX27_WEIM_CSCRxU(cs) (MX27_WEIM_CSCRx_BASE_ADDR(cs)) cs 106 arch/arm/mach-imx/mx27.h #define MX27_WEIM_CSCRxL(cs) (MX27_WEIM_CSCRx_BASE_ADDR(cs) + 0x4) cs 107 arch/arm/mach-imx/mx27.h #define MX27_WEIM_CSCRxA(cs) (MX27_WEIM_CSCRx_BASE_ADDR(cs) + 0x8) cs 112 arch/arm/mach-imx/mx31.h #define MX31_WEIM_CSCRx_BASE_ADDR(cs) (MX31_WEIM_BASE_ADDR + (cs) * 0x10) cs 113 arch/arm/mach-imx/mx31.h #define MX31_WEIM_CSCRxU(cs) (MX31_WEIM_CSCRx_BASE_ADDR(cs)) cs 114 arch/arm/mach-imx/mx31.h #define MX31_WEIM_CSCRxL(cs) (MX31_WEIM_CSCRx_BASE_ADDR(cs) + 0x4) cs 115 arch/arm/mach-imx/mx31.h #define MX31_WEIM_CSCRxA(cs) (MX31_WEIM_CSCRx_BASE_ADDR(cs) + 0x8) cs 139 arch/arm/mach-mmp/time.c static u64 clksrc_read(struct clocksource *cs) cs 393 arch/arm/mach-omap2/clockdomain.c int clkdm_register_clkdms(struct clockdomain **cs) cs 400 arch/arm/mach-omap2/clockdomain.c if (!cs) cs 403 arch/arm/mach-omap2/clockdomain.c for (c = cs; *c; c++) cs 415 arch/arm/mach-omap2/timer.c static u64 clocksource_read_cycles(struct clocksource *cs) cs 23 arch/arm/mach-orion5x/common.h #define ORION_MBUS_DEVBUS_TARGET(cs) 0x01 cs 24 arch/arm/mach-orion5x/common.h #define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs)) cs 426 arch/arm/mach-orion5x/pci.c const struct mbus_dram_window *cs = dram->cs + i; cs 427 arch/arm/mach-orion5x/pci.c u32 func = PCI_CONF_FUNC_BAR_CS(cs->cs_index); cs 434 arch/arm/mach-orion5x/pci.c reg = PCI_CONF_REG_BAR_LO_CS(cs->cs_index); cs 436 arch/arm/mach-orion5x/pci.c val = (cs->base & 0xfffff000) | (val & 0xfff); cs 442 arch/arm/mach-orion5x/pci.c reg = PCI_CONF_REG_BAR_HI_CS(cs->cs_index); cs 444 arch/arm/mach-orion5x/pci.c writel((cs->size - 1) & 0xfffff000, cs 445 arch/arm/mach-orion5x/pci.c PCI_BAR_SIZE_DDR_CS(cs->cs_index)); cs 446 arch/arm/mach-orion5x/pci.c writel(cs->base & 0xfffff000, cs 447 arch/arm/mach-orion5x/pci.c PCI_BAR_REMAP_DDR_CS(cs->cs_index)); cs 452 arch/arm/mach-orion5x/pci.c win_enable &= ~(1 << cs->cs_index); cs 29 arch/arm/mach-rpc/time.c static u64 ioc_timer_read(struct clocksource *cs) cs 155 arch/arm/plat-orion/pcie.c const struct mbus_dram_window *cs = dram->cs + i; cs 157 arch/arm/plat-orion/pcie.c writel(cs->base & 0xffff0000, base + PCIE_WIN04_BASE_OFF(i)); cs 159 arch/arm/plat-orion/pcie.c writel(((cs->size - 1) & 0xffff0000) | cs 160 arch/arm/plat-orion/pcie.c (cs->mbus_attr << 8) | cs 164 arch/arm/plat-orion/pcie.c size += cs->size; cs 176 arch/arm/plat-orion/pcie.c writel(dram->cs[0].base, base + PCIE_BAR_LO_OFF(1)); cs 26 arch/c6x/kernel/time.c static u64 tsc_read(struct clocksource *cs) cs 62 arch/hexagon/kernel/time.c static u64 timer_get_cycles(struct clocksource *cs) cs 25 arch/ia64/kernel/cyclone.c static u64 read_cyclone(struct clocksource *cs) cs 36 arch/ia64/kernel/time.c static u64 itc_get_cycles(struct clocksource *cs); cs 358 arch/ia64/kernel/time.c static u64 itc_get_cycles(struct clocksource *cs) cs 79 arch/m68k/68000/timers.c static u64 m68328_read_clk(struct clocksource *cs) cs 465 arch/m68k/amiga/config.c static u64 amiga_read_clk(struct clocksource *cs); cs 516 arch/m68k/amiga/config.c static u64 amiga_read_clk(struct clocksource *cs) cs 28 arch/m68k/atari/time.c static u64 atari_read_clk(struct clocksource *cs); cs 74 arch/m68k/atari/time.c static u64 atari_read_clk(struct clocksource *cs) cs 151 arch/m68k/bvme6000/config.c static u64 bvme6000_read_clk(struct clocksource *cs); cs 234 arch/m68k/bvme6000/config.c static u64 bvme6000_read_clk(struct clocksource *cs) cs 38 arch/m68k/coldfire/dma_timer.c static u64 cf_dt_get_cycles(struct clocksource *cs) cs 122 arch/m68k/coldfire/pit.c static u64 pit_read_clk(struct clocksource *cs) cs 101 arch/m68k/coldfire/sltimers.c static u64 mcfslt_read_clk(struct clocksource *cs) cs 93 arch/m68k/coldfire/timers.c static u64 mcftmr_read_clk(struct clocksource *cs) cs 23 arch/m68k/hp300/time.c static u64 hp300_read_clk(struct clocksource *cs); cs 75 arch/m68k/hp300/time.c static u64 hp300_read_clk(struct clocksource *cs) cs 43 arch/m68k/include/asm/string.h static inline int strcmp(const char *cs, const char *ct) cs 56 arch/m68k/include/asm/string.h : "+a" (cs), "+a" (ct), "=d" (res)); cs 21 arch/m68k/lib/memset.c char *cs = s; cs 22 arch/m68k/lib/memset.c *cs++ = c; cs 23 arch/m68k/lib/memset.c s = cs; cs 69 arch/m68k/lib/memset.c char *cs = s; cs 70 arch/m68k/lib/memset.c *cs = c; cs 587 arch/m68k/mac/via.c static u64 mac_read_clk(struct clocksource *cs); cs 627 arch/m68k/mac/via.c static u64 mac_read_clk(struct clocksource *cs) cs 96 arch/m68k/mvme147/config.c static u64 mvme147_read_clk(struct clocksource *cs); cs 147 arch/m68k/mvme147/config.c static u64 mvme147_read_clk(struct clocksource *cs) cs 347 arch/m68k/mvme16x/config.c static u64 mvme16x_read_clk(struct clocksource *cs); cs 414 arch/m68k/mvme16x/config.c static u64 mvme16x_read_clk(struct clocksource *cs) cs 193 arch/microblaze/kernel/timer.c static u64 xilinx_read(struct clocksource *cs) cs 35 arch/mips/alchemy/common/time.c static u64 au1x_counter1_read(struct clocksource *cs) cs 689 arch/mips/alchemy/devboards/db1200.c static void db1200_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol) cs 691 arch/mips/alchemy/devboards/db1200.c if (cs) cs 260 arch/mips/alchemy/devboards/db1550.c static void db1550_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol) cs 262 arch/mips/alchemy/devboards/db1550.c if (cs) cs 24 arch/mips/bcm63xx/cs.c static int is_valid_cs(unsigned int cs) cs 26 arch/mips/bcm63xx/cs.c if (cs > 6) cs 35 arch/mips/bcm63xx/cs.c int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size) cs 40 arch/mips/bcm63xx/cs.c if (!is_valid_cs(cs)) cs 55 arch/mips/bcm63xx/cs.c bcm_mpi_writel(val, MPI_CSBASE_REG(cs)); cs 66 arch/mips/bcm63xx/cs.c int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait, cs 72 arch/mips/bcm63xx/cs.c if (!is_valid_cs(cs)) cs 76 arch/mips/bcm63xx/cs.c val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); cs 83 arch/mips/bcm63xx/cs.c bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); cs 94 arch/mips/bcm63xx/cs.c int bcm63xx_set_cs_param(unsigned int cs, u32 params) cs 99 arch/mips/bcm63xx/cs.c if (!is_valid_cs(cs)) cs 103 arch/mips/bcm63xx/cs.c if (cs == MPI_CS_PCMCIA_COMMON || cs 104 arch/mips/bcm63xx/cs.c cs == MPI_CS_PCMCIA_ATTR || cs 105 arch/mips/bcm63xx/cs.c cs == MPI_CS_PCMCIA_IO) cs 109 arch/mips/bcm63xx/cs.c val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); cs 115 arch/mips/bcm63xx/cs.c bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); cs 126 arch/mips/bcm63xx/cs.c int bcm63xx_set_cs_status(unsigned int cs, int enable) cs 131 arch/mips/bcm63xx/cs.c if (!is_valid_cs(cs)) cs 135 arch/mips/bcm63xx/cs.c val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); cs 140 arch/mips/bcm63xx/cs.c bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); cs 69 arch/mips/bcm63xx/dev-pcmcia.c static int __init config_pcmcia_cs(unsigned int cs, cs 74 arch/mips/bcm63xx/dev-pcmcia.c ret = bcm63xx_set_cs_status(cs, 0); cs 76 arch/mips/bcm63xx/dev-pcmcia.c ret = bcm63xx_set_cs_base(cs, base, size); cs 78 arch/mips/bcm63xx/dev-pcmcia.c ret = bcm63xx_set_cs_status(cs, 1); cs 83 arch/mips/bcm63xx/dev-pcmcia.c unsigned int cs; cs 88 arch/mips/bcm63xx/dev-pcmcia.c .cs = MPI_CS_PCMCIA_COMMON, cs 93 arch/mips/bcm63xx/dev-pcmcia.c .cs = MPI_CS_PCMCIA_ATTR, cs 98 arch/mips/bcm63xx/dev-pcmcia.c .cs = MPI_CS_PCMCIA_IO, cs 132 arch/mips/bcm63xx/dev-pcmcia.c ret = config_pcmcia_cs(pcmcia_cs[i].cs, cs 102 arch/mips/cavium-octeon/csrc-octeon.c static u64 octeon_cvmcount_read(struct clocksource *cs) cs 73 arch/mips/cavium-octeon/flash_setup.c u32 cs; cs 77 arch/mips/cavium-octeon/flash_setup.c r = of_property_read_u32(np, "reg", &cs); cs 85 arch/mips/cavium-octeon/flash_setup.c region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); cs 928 arch/mips/cavium-octeon/octeon-platform.c int cs, bootbus; cs 950 arch/mips/cavium-octeon/octeon-platform.c for (cs = 0; cs < 8; cs++) { cs 951 arch/mips/cavium-octeon/octeon-platform.c mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); cs 960 arch/mips/cavium-octeon/octeon-platform.c if (cs >= 7) { cs 974 arch/mips/cavium-octeon/octeon-platform.c cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1)); cs 991 arch/mips/cavium-octeon/octeon-platform.c new_reg[0] = cpu_to_be32(cs); cs 994 arch/mips/cavium-octeon/octeon-platform.c new_reg[3] = cpu_to_be32(cs + 1); cs 1007 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32); cs 1008 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff); cs 1009 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 4] = cpu_to_be32(region_size); cs 1011 arch/mips/cavium-octeon/octeon-platform.c cs++; cs 1012 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 2] = cpu_to_be32(region1_base >> 32); cs 1013 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 3] = cpu_to_be32(region1_base & 0xffffffff); cs 1014 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 4] = cpu_to_be32(region1_size); cs 1030 arch/mips/cavium-octeon/octeon-platform.c int cs, bootbus; cs 1040 arch/mips/cavium-octeon/octeon-platform.c for (cs = 0; cs < 8; cs++) { cs 1041 arch/mips/cavium-octeon/octeon-platform.c mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); cs 1049 arch/mips/cavium-octeon/octeon-platform.c if (cs > 7) cs 1052 arch/mips/cavium-octeon/octeon-platform.c new_reg[0] = cpu_to_be32(cs); cs 1055 arch/mips/cavium-octeon/octeon-platform.c new_reg[3] = cpu_to_be32(cs); cs 1068 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32); cs 1069 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff); cs 1070 arch/mips/cavium-octeon/octeon-platform.c ranges[(cs * 5) + 4] = cpu_to_be32(region_size); cs 242 arch/mips/include/asm/asmmacro.h .macro _cfcmsa rd, cs cs 372 arch/mips/include/asm/asmmacro.h .macro _cfcmsa rd, cs cs 12 arch/mips/include/asm/mach-au1x00/au1550_spi.h void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity); cs 13 arch/mips/include/asm/mach-au1x00/au1550_spi.h void (*deactivate_cs)(struct au1550_spi_info *spi, int cs, int polarity); cs 5 arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size); cs 6 arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait, cs 8 arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h int bcm63xx_set_cs_param(unsigned int cs, u32 flags); cs 9 arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h int bcm63xx_set_cs_status(unsigned int cs, int enable); cs 165 arch/mips/include/asm/msa.h _ASM_MACRO_2R(cfcmsa, rd, cs, cs 177 arch/mips/include/asm/msa.h #define __BUILD_MSA_CTL_REG(name, cs) \ cs 184 arch/mips/include/asm/msa.h " cfcmsa %0, $" #cs "\n" \ cs 195 arch/mips/include/asm/msa.h " ctcmsa $" #cs ", %0\n" \ cs 37 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_CSBASE_ADDR(cs) (cs) cs 38 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_CSADDR_MASK(cs) (0x10 + (cs)) cs 39 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_CSDEV_PARM(cs) (0x20 + (cs)) cs 40 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_CSTIME_PARMA(cs) (0x30 + (cs)) cs 41 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_CSTIME_PARMB(cs) (0x40 + (cs)) cs 48 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_NAND_CLE(cs) (0x90 + (cs)) cs 49 arch/mips/include/asm/netlogic/xlr/flash.h #define FLASH_NAND_ALE(cs) (0xa0 + (cs)) cs 488 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_IO_EXT_CS_BASE(cs) (A_IO_EXT_CFG_BASE+IO_EXT_REGISTER_SPACING*(cs)) cs 489 arch/mips/include/asm/sibyte/sb1250_regs.h #define R_IO_EXT_REG(reg, cs) ((cs)*IO_EXT_REGISTER_SPACING + (reg)) cs 91 arch/mips/include/asm/txx9/tx4939.h struct tx4939_le_reg cs; cs 26 arch/mips/kernel/cevt-txx9.c struct clocksource cs; cs 30 arch/mips/kernel/cevt-txx9.c static u64 txx9_cs_read(struct clocksource *cs) cs 33 arch/mips/kernel/cevt-txx9.c container_of(cs, struct txx9_clocksource, cs); cs 41 arch/mips/kernel/cevt-txx9.c .cs = { cs 60 arch/mips/kernel/cevt-txx9.c clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); cs 19 arch/mips/kernel/csrc-bcm1480.c static u64 bcm1480_hpt_read(struct clocksource *cs) cs 39 arch/mips/kernel/csrc-bcm1480.c struct clocksource *cs = &bcm1480_clocksource; cs 45 arch/mips/kernel/csrc-bcm1480.c clocksource_register_hz(cs, zbbus); cs 16 arch/mips/kernel/csrc-ioasic.c static u64 dec_ioasic_hpt_read(struct clocksource *cs) cs 14 arch/mips/kernel/csrc-r4k.c static u64 c0_hpt_read(struct clocksource *cs) cs 35 arch/mips/kernel/csrc-sb1250.c static u64 sb1250_hpt_read(struct clocksource *cs) cs 55 arch/mips/kernel/csrc-sb1250.c struct clocksource *cs = &bcm1250_clocksource; cs 68 arch/mips/kernel/csrc-sb1250.c clocksource_register_hz(cs, V_SCD_TIMER_FREQ); cs 96 arch/mips/lasat/at93c.c at93c_reg_write((at93c_reg_read() | at93c->cs) & cs 103 arch/mips/lasat/at93c.c at93c_reg_write(at93c_reg_read() & ~at93c->cs); cs 14 arch/mips/lasat/at93c.h u32 cs; cs 72 arch/mips/lasat/prom.c .cs = AT93C_CS_M_100, cs 79 arch/mips/lasat/prom.c .cs = AT93C_CS_M_200, cs 62 arch/mips/loongson32/common/time.c static u64 ls1x_clocksource_read(struct clocksource *cs) cs 145 arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c static u64 mfgpt_read(struct clocksource *cs) cs 254 arch/mips/loongson64/loongson-3/hpet.c static u64 hpet_read_counter(struct clocksource *cs) cs 259 arch/mips/loongson64/loongson-3/hpet.c static void hpet_suspend(struct clocksource *cs) cs 263 arch/mips/loongson64/loongson-3/hpet.c static void hpet_resume(struct clocksource *cs) cs 62 arch/mips/netlogic/common/time.c static u64 nlm_get_pic_timer(struct clocksource *cs) cs 69 arch/mips/netlogic/common/time.c static u64 nlm_get_pic_timer32(struct clocksource *cs) cs 88 arch/mips/netlogic/xlr/platform-flash.c int cs; cs 99 arch/mips/netlogic/xlr/platform-flash.c FLASH_NAND_CLE(nand_priv.cs), cmd); cs 102 arch/mips/netlogic/xlr/platform-flash.c FLASH_NAND_ALE(nand_priv.cs), cmd); cs 143 arch/mips/netlogic/xlr/platform-flash.c uint64_t flash_map_base, int cs, struct resource *res) cs 147 arch/mips/netlogic/xlr/platform-flash.c base = nlm_read_reg(flash_mmio, FLASH_CSBASE_ADDR(cs)); cs 148 arch/mips/netlogic/xlr/platform-flash.c mask = nlm_read_reg(flash_mmio, FLASH_CSADDR_MASK(cs)); cs 158 arch/mips/netlogic/xlr/platform-flash.c int cs, boot_nand, boot_nor; cs 187 arch/mips/netlogic/xlr/platform-flash.c cs = 0; cs 190 arch/mips/netlogic/xlr/platform-flash.c nand_priv.cs = cs; cs 192 arch/mips/netlogic/xlr/platform-flash.c setup_flash_resource(flash_mmio, flash_map_base, cs, cs 196 arch/mips/netlogic/xlr/platform-flash.c nlm_write_reg(flash_mmio, FLASH_CSDEV_PARM(cs), cs 198 arch/mips/netlogic/xlr/platform-flash.c nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMA(cs), cs 200 arch/mips/netlogic/xlr/platform-flash.c nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMB(cs), cs 203 arch/mips/netlogic/xlr/platform-flash.c pr_info("ChipSelect %d: NAND Flash %pR\n", cs, xlr_nand_res); cs 208 arch/mips/netlogic/xlr/platform-flash.c setup_flash_resource(flash_mmio, flash_map_base, cs, cs 210 arch/mips/netlogic/xlr/platform-flash.c pr_info("ChipSelect %d: NOR Flash %pR\n", cs, xlr_nor_res); cs 120 arch/mips/sgi-ip27/ip27-timer.c static u64 hub_rt_read(struct clocksource *cs) cs 140 arch/mips/sgi-ip27/ip27-timer.c struct clocksource *cs = &hub_rt_clocksource; cs 142 arch/mips/sgi-ip27/ip27-timer.c clocksource_register_hz(cs, CYCLES_PER_SEC); cs 211 arch/mips/txx9/generic/irq_tx4939.c u32 csr = __raw_readl(&tx4939_ircptr->cs.r); cs 45 arch/mips/txx9/generic/mem_tx4927.c unsigned int cs = 0; cs 60 arch/mips/txx9/generic/mem_tx4927.c cs = 256 << sdccr_cs; cs 64 arch/mips/txx9/generic/mem_tx4927.c return rs * cs * mw * bs; cs 48 arch/nios2/kernel/time.c struct clocksource cs; cs 58 arch/nios2/kernel/time.c to_nios2_clksource(struct clocksource *cs) cs 60 arch/nios2/kernel/time.c return container_of(cs, struct nios2_clocksource, cs); cs 84 arch/nios2/kernel/time.c static u64 nios2_timer_read(struct clocksource *cs) cs 86 arch/nios2/kernel/time.c struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs); cs 99 arch/nios2/kernel/time.c .cs = { cs 112 arch/nios2/kernel/time.c return nios2_timer_read(&nios2_cs.cs); cs 295 arch/nios2/kernel/time.c ret = clocksource_register_hz(&nios2_cs.cs, freq); cs 136 arch/openrisc/kernel/time.c static u64 openrisc_timer_read(struct clocksource *cs) cs 250 arch/parisc/include/asm/pdcpat.h u64 cs:1; /* clear status: cleared since the last call? */ cs 143 arch/parisc/kernel/time.c static u64 notrace read_cr16(struct clocksource *cs) cs 172 arch/powerpc/boot/4xx.c u32 val, cs; cs 192 arch/powerpc/boot/4xx.c cs = 0; cs 195 arch/powerpc/boot/4xx.c cs++; cs 198 arch/powerpc/boot/4xx.c return cs; cs 204 arch/powerpc/boot/4xx.c u32 cs, col, row, bank, dpath; cs 216 arch/powerpc/boot/4xx.c cs = ibm4xx_denali_get_cs(); cs 217 arch/powerpc/boot/4xx.c if (!cs) cs 219 arch/powerpc/boot/4xx.c if (cs > max_cs) cs 251 arch/powerpc/boot/4xx.c memsize = cs * (1 << (col+row)) * bank * dpath; cs 81 arch/powerpc/boot/cuboot-pq2.c int cs = cs_ranges_buf[i].csnum; cs 82 arch/powerpc/boot/cuboot-pq2.c if (cs >= ctrl_size / 8) cs 88 arch/powerpc/boot/cuboot-pq2.c base = in_be32(&ctrl_addr[cs * 2]); cs 95 arch/powerpc/boot/cuboot-pq2.c option = in_be32(&ctrl_addr[cs * 2 + 1]) & 0x7fff; cs 101 arch/powerpc/boot/cuboot-pq2.c out_be32(&ctrl_addr[cs * 2], 0); cs 102 arch/powerpc/boot/cuboot-pq2.c out_be32(&ctrl_addr[cs * 2 + 1], cs 104 arch/powerpc/boot/cuboot-pq2.c out_be32(&ctrl_addr[cs * 2], base | cs_ranges_buf[i].addr); cs 86 arch/powerpc/include/asm/icswx.h u8 cs; cs 59 arch/powerpc/include/asm/mpc5121.h int mpc512x_cs_config(unsigned int cs, u32 val); cs 299 arch/powerpc/include/asm/mpc52xx.h unsigned int cs; cs 422 arch/powerpc/include/asm/ps3av.h struct ps3av_info_cs cs; cs 229 arch/powerpc/kernel/prom_init.c static int __init prom_strcmp(const char *cs, const char *ct) cs 234 arch/powerpc/kernel/prom_init.c c1 = *cs++; cs 253 arch/powerpc/kernel/prom_init.c static int __init prom_strncmp(const char *cs, const char *ct, size_t count) cs 258 arch/powerpc/kernel/prom_init.c c1 = *cs++; cs 278 arch/powerpc/kernel/prom_init.c static int __init prom_memcmp(const void *cs, const void *ct, size_t count) cs 283 arch/powerpc/kernel/prom_init.c for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) cs 857 arch/powerpc/kernel/time.c static notrace u64 rtc_read(struct clocksource *cs) cs 862 arch/powerpc/kernel/time.c static notrace u64 timebase_read(struct clocksource *cs) cs 163 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c u32 cs = 0; cs 218 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c cs = lpbfifo.cs_ranges[i].csnum; cs 293 arch/powerpc/platforms/512x/mpc512x_lpbfifo.c bits = MPC512X_SCLPC_CS(cs); cs 487 arch/powerpc/platforms/512x/mpc512x_shared.c int mpc512x_cs_config(unsigned int cs, u32 val) cs 492 arch/powerpc/platforms/512x/mpc512x_shared.c if (cs > 7) cs 503 arch/powerpc/platforms/512x/mpc512x_shared.c out_be32(&lpc->cs_cfg[cs], val); cs 166 arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c bit_fields = req->cs << 24 | 0x000008; cs 193 arch/powerpc/platforms/cell/spider-pic.c unsigned int cs, virq; cs 195 arch/powerpc/platforms/cell/spider-pic.c cs = in_be32(pic->regs + TIR_CS) >> 24; cs 196 arch/powerpc/platforms/cell/spider-pic.c if (cs == SPIDER_IRQ_INVALID) cs 199 arch/powerpc/platforms/cell/spider-pic.c virq = irq_linear_revmap(pic->host, cs); cs 8 arch/s390/boot/string.c int strncmp(const char *cs, const char *ct, size_t count) cs 13 arch/s390/boot/string.c c1 = *cs++; cs 66 arch/s390/include/asm/cpu_mf.h unsigned int cs:1; /* 30: basic-sampling activation control */ cs 89 arch/s390/include/asm/cpu_mf.h unsigned int cs:1; /* 62: basic-sampling activation control */ cs 989 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, cs 1010 arch/s390/kernel/perf_cpum_sf.c inactive.cs = 0; cs 1848 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.cs = 1; cs 1865 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.cs = 0; cs 150 arch/s390/kernel/perf_event.c si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); cs 242 arch/s390/kernel/time.c static u64 read_tod_clock(struct clocksource *cs) cs 122 arch/s390/kvm/gaccess.c unsigned long cs : 1; /* Common-Segment Bit */ cs 138 arch/s390/kvm/gaccess.c unsigned long cs : 1; /* Common-Segment Bit */ cs 152 arch/s390/kvm/gaccess.c unsigned long cs : 1; /* Common-Segment Bit */ cs 738 arch/s390/kvm/gaccess.c if (ste.cs && asce.p) cs 1130 arch/s390/kvm/gaccess.c if (ste.cs && asce.p) cs 67 arch/sh/boards/mach-r2d/setup.c static void r2d_chip_select(struct sh_spi_info *spi, int cs, int state) cs 69 arch/sh/boards/mach-r2d/setup.c BUG_ON(cs != 0); /* Single Epson RTC-9701JE attached on CS0 */ cs 11 arch/sh/include/asm/spi.h void (*chip_select)(struct sh_spi_info *spi, int cs, int state); cs 152 arch/sparc/kernel/time_32.c static u64 timer_cs_read(struct clocksource *cs) cs 794 arch/sparc/kernel/time_64.c static u64 clocksource_tick_read(struct clocksource *cs) cs 138 arch/um/kernel/time.c static u64 timer_read(struct clocksource *cs) cs 62 arch/unicore32/kernel/time.c static u64 puv3_read_oscr(struct clocksource *cs) cs 327 arch/x86/boot/boot.h int strncmp(const char *cs, const char *ct, size_t count); cs 64 arch/x86/boot/string.c int strncmp(const char *cs, const char *ct, size_t count) cs 69 arch/x86/boot/string.c c1 = *cs++; cs 23 arch/x86/boot/string.h extern int strncmp(const char *cs, const char *ct, size_t count); cs 416 arch/x86/entry/common.c return regs->cs == __USER32_CS && regs->ss == __USER_DS && cs 431 arch/x86/entry/common.c regs->cs == __USER_CS && regs->ss == __USER_DS && cs 81 arch/x86/entry/vsyscall/vsyscall_64.c message, regs->ip, regs->cs, cs 2440 arch/x86/events/core.c cs_base = get_segment_base(regs->cs); cs 2550 arch/x86/events/core.c return 0x10 * regs->cs; cs 2552 arch/x86/events/core.c if (user_mode(regs) && regs->cs != __USER_CS) cs 2553 arch/x86/events/core.c return get_segment_base(regs->cs); cs 2556 arch/x86/events/core.c regs->cs != __USER32_CS) cs 2557 arch/x86/events/core.c return get_segment_base(regs->cs); cs 891 arch/x86/events/perf_event.h regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; cs 144 arch/x86/ia32/ia32_aout.c regs->cs = __USER32_CS; cs 241 arch/x86/ia32/ia32_aout.c (regs)->cs = __USER32_CS; cs 92 arch/x86/ia32/ia32_signal.c COPY_SEG_CPL3(cs); cs 203 arch/x86/ia32/ia32_signal.c put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); cs 324 arch/x86/ia32/ia32_signal.c regs->cs = __USER32_CS; cs 403 arch/x86/ia32/ia32_signal.c regs->cs = __USER32_CS; cs 135 arch/x86/include/asm/elf.h pr_reg[13] = regs->cs; \ cs 226 arch/x86/include/asm/elf.h (pr_reg)[17] = (regs)->cs; \ cs 94 arch/x86/include/asm/kexec.h asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); cs 116 arch/x86/include/asm/kexec.h asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); cs 856 arch/x86/include/asm/paravirt.h #define PARA_INDIRECT(addr) *%cs:addr cs 316 arch/x86/include/asm/perf_event.h (regs)->cs = __KERNEL_CS; \ cs 289 arch/x86/include/asm/processor.h unsigned short cs, __csh; cs 46 arch/x86/include/asm/ptrace.h unsigned short cs; cs 84 arch/x86/include/asm/ptrace.h unsigned long cs; cs 129 arch/x86/include/asm/ptrace.h return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; cs 131 arch/x86/include/asm/ptrace.h return !!(regs->cs & 3); cs 152 arch/x86/include/asm/ptrace.h return regs->cs == __USER_CS; cs 155 arch/x86/include/asm/ptrace.h return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; cs 220 arch/x86/include/asm/ptrace.h if (offset == offsetof(struct pt_regs, cs) || cs 22 arch/x86/include/asm/string_32.h extern int strcmp(const char *cs, const char *ct); cs 25 arch/x86/include/asm/string_32.h extern int strncmp(const char *cs, const char *ct, size_t count); cs 196 arch/x86/include/asm/string_32.h extern void *memchr(const void *cs, int c, size_t count); cs 218 arch/x86/include/asm/string_32.h extern char *strstr(const char *cs, const char *ct); cs 61 arch/x86/include/asm/string_64.h int memcmp(const void *cs, const void *ct, size_t count); cs 65 arch/x86/include/asm/string_64.h int strcmp(const char *cs, const char *ct); cs 163 arch/x86/include/asm/svm.h struct vmcb_seg cs; cs 41 arch/x86/include/asm/user32.h unsigned short cs, __cs; cs 92 arch/x86/include/asm/user_32.h unsigned long cs; cs 87 arch/x86/include/asm/user_64.h unsigned long cs; cs 144 arch/x86/include/asm/xen/interface.h uint16_t cs; /* code selector */ cs 308 arch/x86/include/asm/xen/interface.h uint16_t cs; cs 60 arch/x86/include/asm/xen/interface_32.h uint16_t cs; cs 81 arch/x86/include/asm/xen/interface_32.h unsigned long cs; cs 87 arch/x86/include/asm/xen/interface_32.h ((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) }) cs 84 arch/x86/include/asm/xen/interface_64.h uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; cs 118 arch/x86/include/asm/xen/interface_64.h uint16_t cs, _pad0[1]; cs 151 arch/x86/include/uapi/asm/kvm.h struct kvm_segment cs, ds, es, fs, gs, ss; cs 26 arch/x86/include/uapi/asm/mce.h __u8 cs; /* Code segment */ cs 72 arch/x86/include/uapi/asm/ptrace.h unsigned long cs; cs 218 arch/x86/include/uapi/asm/sigcontext.h __u16 cs, __csh; cs 257 arch/x86/include/uapi/asm/sigcontext.h __u16 cs; cs 316 arch/x86/include/uapi/asm/sigcontext.h __u16 cs, __csh; cs 344 arch/x86/include/uapi/asm/sigcontext.h __u16 cs; cs 80 arch/x86/include/uapi/asm/vm86.h unsigned short cs, __csh; cs 40 arch/x86/kernel/asm-offsets_32.c OFFSET(PT_CS, pt_regs, cs); cs 254 arch/x86/kernel/cpu/mce/core.c m->cs, m->ip); cs 256 arch/x86/kernel/cpu/mce/core.c if (m->cs == __KERNEL_CS) cs 446 arch/x86/kernel/cpu/mce/core.c m->cs = regs->cs; cs 454 arch/x86/kernel/cpu/mce/core.c m->cs |= 3; cs 1353 arch/x86/kernel/cpu/mce/core.c if ((m.cs & 3) == 3) { cs 1629 arch/x86/kernel/cpu/mce/core.c m->cs = regs->cs; cs 146 arch/x86/kernel/cpu/mce/inject.c regs.cs = m->cs; cs 214 arch/x86/kernel/cpu/mce/severity.c if ((m->cs & 3) == 3) cs 64 arch/x86/kernel/doublefault.c .cs = __KERNEL_CS, cs 124 arch/x86/kernel/dumpstack.c printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); cs 250 arch/x86/kernel/fpu/regset.c env->fcs = task_pt_regs(tsk)->cs; cs 280 arch/x86/kernel/hpet.c static void hpet_resume_counter(struct clocksource *cs) cs 683 arch/x86/kernel/hpet.c static u64 read_hpet(struct clocksource *cs) cs 741 arch/x86/kernel/hpet.c static u64 read_hpet(struct clocksource *cs) cs 59 arch/x86/kernel/kgdb.c { "cs", 4, offsetof(struct pt_regs, cs) }, cs 82 arch/x86/kernel/kgdb.c { "cs", 4, offsetof(struct pt_regs, cs) }, cs 782 arch/x86/kernel/kprobes/core.c regs->cs = __KERNEL_CS; cs 784 arch/x86/kernel/kprobes/core.c regs->cs |= get_kernel_rpl(); cs 160 arch/x86/kernel/kprobes/opt.c regs->cs = __KERNEL_CS; cs 162 arch/x86/kernel/kprobes/opt.c regs->cs |= get_kernel_rpl(); cs 95 arch/x86/kernel/kvmclock.c static u64 kvm_clock_get_cycles(struct clocksource *cs) cs 494 arch/x86/kernel/nmi.c struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks); cs 495 arch/x86/kernel/nmi.c unsigned long top = CEA_ESTACK_TOP(cs, DB); cs 496 arch/x86/kernel/nmi.c unsigned long bot = CEA_ESTACK_BOT(cs, DB1); cs 31 arch/x86/kernel/perf_regs.c PT_REGS_OFFSET(PERF_REG_X86_CS, cs), cs 168 arch/x86/kernel/perf_regs.c regs_user_copy->cs = user_regs->cs; cs 191 arch/x86/kernel/process_32.c regs->cs = __USER_CS; cs 119 arch/x86/kernel/process_64.c printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds, cs 469 arch/x86/kernel/process_64.c regs->cs = _cs; cs 92 arch/x86/kernel/ptrace.c REG_OFFSET_NAME(cs), cs 200 arch/x86/kernel/ptrace.c case offsetof(struct user_regs_struct, cs): cs 264 arch/x86/kernel/ptrace.c case offsetof(struct user_regs_struct, cs): cs 305 arch/x86/kernel/ptrace.c case offsetof(struct user_regs_struct,cs): cs 308 arch/x86/kernel/ptrace.c task_pt_regs(task)->cs = value; cs 358 arch/x86/kernel/ptrace.c case offsetof(struct user_regs_struct, cs): cs 400 arch/x86/kernel/ptrace.c case offsetof(struct user_regs_struct, cs): cs 871 arch/x86/kernel/ptrace.c SEG32(cs); cs 949 arch/x86/kernel/ptrace.c R32(cs, cs); cs 1129 arch/x86/kernel/ptrace.c addr < offsetof(struct user_regs_struct, cs)) cs 1150 arch/x86/kernel/ptrace.c addr < offsetof(struct user_regs_struct, cs)) cs 132 arch/x86/kernel/signal.c COPY_SEG_CPL3(cs); cs 196 arch/x86/kernel/signal.c put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); cs 202 arch/x86/kernel/signal.c put_user_ex(regs->cs, &sc->cs); cs 370 arch/x86/kernel/signal.c regs->cs = __USER_CS; cs 436 arch/x86/kernel/signal.c regs->cs = __USER_CS; cs 529 arch/x86/kernel/signal.c regs->cs = __USER_CS; cs 597 arch/x86/kernel/signal.c regs->cs = __USER_CS; cs 17 arch/x86/kernel/step.c seg = regs->cs; cs 119 arch/x86/kernel/time.c void clocksource_arch_init(struct clocksource *cs) cs 121 arch/x86/kernel/time.c if (cs->archdata.vclock_mode == VCLOCK_NONE) cs 124 arch/x86/kernel/time.c if (cs->archdata.vclock_mode > VCLOCK_MAX) { cs 126 arch/x86/kernel/time.c cs->name, cs->archdata.vclock_mode); cs 127 arch/x86/kernel/time.c cs->archdata.vclock_mode = VCLOCK_NONE; cs 130 arch/x86/kernel/time.c if (cs->mask != CLOCKSOURCE_MASK(64)) { cs 132 arch/x86/kernel/time.c cs->name, cs->mask); cs 133 arch/x86/kernel/time.c cs->archdata.vclock_mode = VCLOCK_NONE; cs 339 arch/x86/kernel/traps.c regs->cs == __KERNEL_CS && cs 1064 arch/x86/kernel/tsc.c static void tsc_resume(struct clocksource *cs) cs 1085 arch/x86/kernel/tsc.c static u64 read_tsc(struct clocksource *cs) cs 1090 arch/x86/kernel/tsc.c static void tsc_cs_mark_unstable(struct clocksource *cs) cs 1102 arch/x86/kernel/tsc.c static void tsc_cs_tick_stable(struct clocksource *cs) cs 1230 arch/x86/kernel/tsc.c return (struct system_counterval_t) {.cs = art_related_clocksource, cs 1268 arch/x86/kernel/tsc.c return (struct system_counterval_t) { .cs = art_related_clocksource, cs 133 arch/x86/kernel/vm86_32.c put_user_ex(regs->pt.cs, &user->regs.cs); cs 297 arch/x86/kernel/vm86_32.c get_user_ex(seg, &user_vm86->regs.cs); cs 298 arch/x86/kernel/vm86_32.c vm86regs.pt.cs = seg; cs 544 arch/x86/kernel/vm86_32.c if (regs->pt.cs == BIOSSEG) cs 556 arch/x86/kernel/vm86_32.c pushw(ssp, sp, regs->pt.cs, cannot_handle); cs 558 arch/x86/kernel/vm86_32.c regs->pt.cs = segoffs >> 16; cs 605 arch/x86/kernel/vm86_32.c csp = (unsigned char __user *) (regs->pt.cs << 4); cs 695 arch/x86/kernel/vm86_32.c regs->pt.cs = newcs; cs 2083 arch/x86/kvm/emulate.c u16 cs, eip; cs 2108 arch/x86/kvm/emulate.c rc = linear_read_system(ctxt, cs_addr, &cs, 2); cs 2116 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); cs 2156 arch/x86/kvm/emulate.c unsigned long cs = 0; cs 2176 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); cs 2186 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); cs 2300 arch/x86/kvm/emulate.c unsigned long eip, cs; cs 2307 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); cs 2311 arch/x86/kvm/emulate.c if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) cs 2313 arch/x86/kvm/emulate.c rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, cs 2696 arch/x86/kvm/emulate.c struct desc_struct *cs, struct desc_struct *ss) cs 2698 arch/x86/kvm/emulate.c cs->l = 0; /* will be adjusted later */ cs 2699 arch/x86/kvm/emulate.c set_desc_base(cs, 0); /* flat segment */ cs 2700 arch/x86/kvm/emulate.c cs->g = 1; /* 4kb granularity */ cs 2701 arch/x86/kvm/emulate.c set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs 2702 arch/x86/kvm/emulate.c cs->type = 0x0b; /* Read, Execute, Accessed */ cs 2703 arch/x86/kvm/emulate.c cs->s = 1; cs 2704 arch/x86/kvm/emulate.c cs->dpl = 0; /* will be adjusted later */ cs 2705 arch/x86/kvm/emulate.c cs->p = 1; cs 2706 arch/x86/kvm/emulate.c cs->d = 1; cs 2707 arch/x86/kvm/emulate.c cs->avl = 0; cs 2788 arch/x86/kvm/emulate.c struct desc_struct cs, ss; cs 2802 arch/x86/kvm/emulate.c setup_syscalls_segments(ctxt, &cs, &ss); cs 2813 arch/x86/kvm/emulate.c cs.d = 0; cs 2814 arch/x86/kvm/emulate.c cs.l = 1; cs 2816 arch/x86/kvm/emulate.c ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); cs 2848 arch/x86/kvm/emulate.c struct desc_struct cs, ss; cs 2870 arch/x86/kvm/emulate.c setup_syscalls_segments(ctxt, &cs, &ss); cs 2880 arch/x86/kvm/emulate.c cs.d = 0; cs 2881 arch/x86/kvm/emulate.c cs.l = 1; cs 2884 arch/x86/kvm/emulate.c ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); cs 2900 arch/x86/kvm/emulate.c struct desc_struct cs, ss; cs 2910 arch/x86/kvm/emulate.c setup_syscalls_segments(ctxt, &cs, &ss); cs 2920 arch/x86/kvm/emulate.c cs.dpl = 3; cs 2937 arch/x86/kvm/emulate.c cs.d = 0; cs 2938 arch/x86/kvm/emulate.c cs.l = 1; cs 2947 arch/x86/kvm/emulate.c ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); cs 3065 arch/x86/kvm/emulate.c tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); cs 3094 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); cs 3098 arch/x86/kvm/emulate.c cpl = tss->cs & 3; cs 3112 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, cs 3179 arch/x86/kvm/emulate.c tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); cs 3214 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); cs 3230 arch/x86/kvm/emulate.c cpl = tss->cs & 3; cs 3245 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, cs 830 arch/x86/kvm/svm.c svm->int3_rip = rip + svm->vmcb->save.cs.base; cs 1612 arch/x86/kvm/svm.c save->cs.selector = 0xf000; cs 1613 arch/x86/kvm/svm.c save->cs.base = 0xffff0000; cs 1615 arch/x86/kvm/svm.c save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | cs 1617 arch/x86/kvm/svm.c save->cs.limit = 0xffff; cs 2438 arch/x86/kvm/svm.c case VCPU_SREG_CS: return &save->cs; cs 2792 arch/x86/kvm/svm.c svm->vmcb->save.cs.base + svm->vmcb->save.rip; cs 2805 arch/x86/kvm/svm.c kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; cs 3382 arch/x86/kvm/svm.c nested_vmcb->save.cs = vmcb->save.cs; cs 3452 arch/x86/kvm/svm.c svm->vmcb->save.cs = hsave->save.cs; cs 3556 arch/x86/kvm/svm.c svm->vmcb->save.cs = nested_vmcb->save.cs; cs 3698 arch/x86/kvm/svm.c hsave->save.cs = vmcb->save.cs; cs 4903 arch/x86/kvm/svm.c save->cs.selector, save->cs.attrib, cs 4904 arch/x86/kvm/svm.c save->cs.limit, save->cs.base); cs 25 arch/x86/kvm/tss.h u32 cs; cs 54 arch/x86/kvm/tss.h u16 cs; cs 3253 arch/x86/kvm/vmx/vmx.c struct kvm_segment cs; cs 3256 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs 3257 arch/x86/kvm/vmx/vmx.c cs_rpl = cs.selector & SEGMENT_RPL_MASK; cs 3259 arch/x86/kvm/vmx/vmx.c if (cs.unusable) cs 3261 arch/x86/kvm/vmx/vmx.c if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) cs 3263 arch/x86/kvm/vmx/vmx.c if (!cs.s) cs 3265 arch/x86/kvm/vmx/vmx.c if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { cs 3266 arch/x86/kvm/vmx/vmx.c if (cs.dpl > cs_rpl) cs 3269 arch/x86/kvm/vmx/vmx.c if (cs.dpl != cs_rpl) cs 3272 arch/x86/kvm/vmx/vmx.c if (!cs.present) cs 3364 arch/x86/kvm/vmx/vmx.c struct kvm_segment cs, ss; cs 3366 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs 3369 arch/x86/kvm/vmx/vmx.c return ((cs.selector & SEGMENT_RPL_MASK) == cs 4580 arch/x86/kvm/vmx/vmx.c .cs = 3, /* Fake ring 3 no matter what the guest ran on */ cs 6257 arch/x86/kvm/vmx/vmx.c [cs]"i"(__KERNEL_CS) cs 7858 arch/x86/kvm/x86.c struct kvm_segment cs, ds; cs 7902 arch/x86/kvm/x86.c cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; cs 7903 arch/x86/kvm/x86.c cs.base = vcpu->arch.smbase; cs 7908 arch/x86/kvm/x86.c cs.limit = ds.limit = 0xffffffff; cs 7909 arch/x86/kvm/x86.c cs.type = ds.type = 0x3; cs 7910 arch/x86/kvm/x86.c cs.dpl = ds.dpl = 0; cs 7911 arch/x86/kvm/x86.c cs.db = ds.db = 0; cs 7912 arch/x86/kvm/x86.c cs.s = ds.s = 1; cs 7913 arch/x86/kvm/x86.c cs.l = ds.l = 0; cs 7914 arch/x86/kvm/x86.c cs.g = ds.g = 1; cs 7915 arch/x86/kvm/x86.c cs.avl = ds.avl = 0; cs 7916 arch/x86/kvm/x86.c cs.present = ds.present = 1; cs 7917 arch/x86/kvm/x86.c cs.unusable = ds.unusable = 0; cs 7918 arch/x86/kvm/x86.c cs.padding = ds.padding = 0; cs 7920 arch/x86/kvm/x86.c kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); cs 8714 arch/x86/kvm/x86.c struct kvm_segment cs; cs 8716 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs 8717 arch/x86/kvm/x86.c *db = cs.db; cs 8718 arch/x86/kvm/x86.c *l = cs.l; cs 8726 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); cs 8857 arch/x86/kvm/x86.c if (sregs->efer & EFER_LMA || sregs->cs.l) cs 8927 arch/x86/kvm/x86.c kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); cs 8941 arch/x86/kvm/x86.c sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && cs 9287 arch/x86/kvm/x86.c struct kvm_segment cs; cs 9289 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs 9290 arch/x86/kvm/x86.c cs.selector = vector << 8; cs 9291 arch/x86/kvm/x86.c cs.base = vector << 12; cs 9292 arch/x86/kvm/x86.c kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); cs 329 arch/x86/lib/insn-eval.c return (unsigned short)(regs->cs & 0xffff); cs 353 arch/x86/lib/insn-eval.c return (unsigned short)(regs->cs & 0xffff); cs 373 arch/x86/lib/insn-eval.c return (unsigned short)(regs->cs & 0xffff); cs 95 arch/x86/lib/string_32.c int strcmp(const char *cs, const char *ct) cs 110 arch/x86/lib/string_32.c : "1" (cs), "2" (ct) cs 118 arch/x86/lib/string_32.c int strncmp(const char *cs, const char *ct, size_t count) cs 135 arch/x86/lib/string_32.c : "1" (cs), "2" (ct), "3" (count) cs 180 arch/x86/lib/string_32.c void *memchr(const void *cs, int c, size_t count) cs 192 arch/x86/lib/string_32.c : "a" (c), "0" (cs), "1" (count) cs 5 arch/x86/lib/strstr_32.c char *strstr(const char *cs, const char *ct) cs 29 arch/x86/lib/strstr_32.c : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) cs 79 arch/x86/math-emu/fpu_system.h #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs)) cs 46 arch/x86/math-emu/get_address.c offsetof(struct pt_regs, cs), cs 59 arch/x86/math-emu/get_address.c offsetof(struct pt_regs, cs), cs 209 arch/x86/mm/extable.c if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { cs 248 arch/x86/mm/extable.c if (!xen_pv_domain() && regs->cs != __KERNEL_CS) cs 273 arch/x86/mm/extable.c (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, cs 550 arch/x86/mm/fault.c if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) cs 20 arch/x86/platform/uv/uv_time.c static u64 uv_read_rtc(struct clocksource *cs); cs 286 arch/x86/platform/uv/uv_time.c static u64 uv_read_rtc(struct clocksource *cs) cs 195 arch/x86/um/signal.c GETREG(CS, cs); cs 276 arch/x86/um/signal.c PUTREG(CS, cs); cs 687 arch/x86/xen/enlighten_pv.c info->cs = gate_segment(val); cs 469 arch/x86/xen/pmu.c regs->cs = xen_regs->cs; cs 474 arch/x86/xen/pmu.c regs->cs |= 3; cs 476 arch/x86/xen/pmu.c regs->cs &= ~3; cs 479 arch/x86/xen/pmu.c regs->cs |= 3; cs 481 arch/x86/xen/pmu.c regs->cs &= ~3; cs 319 arch/x86/xen/smp_pv.c ctxt->user_regs.cs = __KERNEL_CS; cs 57 arch/x86/xen/time.c static u64 xen_clocksource_get_cycles(struct clocksource *cs) cs 37 arch/xtensa/kernel/time.c static u64 ccount_read(struct clocksource *cs) cs 37 block/partitions/sgi.c __be32 *ui, cs; cs 56 block/partitions/sgi.c cs = *ui--; cs 57 block/partitions/sgi.c csum += be32_to_cpu(cs); cs 48 drivers/ata/ahci_mvebu.c const struct mbus_dram_window *cs = dram->cs + i; cs 50 drivers/ata/ahci_mvebu.c writel((cs->mbus_attr << 8) | cs 53 drivers/ata/ahci_mvebu.c writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i)); cs 54 drivers/ata/ahci_mvebu.c writel(((cs->size - 1) & 0xffff0000), cs 88 drivers/ata/pata_octeon_cf.c static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier) cs 108 drivers/ata/pata_octeon_cf.c reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); cs 118 drivers/ata/pata_octeon_cf.c cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64); cs 4028 drivers/ata/sata_mv.c const struct mbus_dram_window *cs = dram->cs + i; cs 4030 drivers/ata/sata_mv.c writel(((cs->size - 1) & 0xffff0000) | cs 4031 drivers/ata/sata_mv.c (cs->mbus_attr << 8) | cs 4034 drivers/ata/sata_mv.c writel(cs->base, hpriv->base + WINDOW_BASE(i)); cs 477 drivers/base/class.c struct class_attribute_string *cs; cs 479 drivers/base/class.c cs = container_of(attr, struct class_attribute_string, attr); cs 480 drivers/base/class.c return snprintf(buf, PAGE_SIZE, "%s\n", cs->str); cs 1225 drivers/block/drbd/drbd_state.c static void set_ov_position(struct drbd_device *device, enum drbd_conns cs) cs 1231 drivers/block/drbd/drbd_state.c if (cs == C_VERIFY_T) { cs 2109 drivers/block/drbd/drbd_state.c union drbd_dev_state os, cs = { cs 2123 drivers/block/drbd/drbd_state.c cs = os; cs 2128 drivers/block/drbd/drbd_state.c if (cs.role != os.role) cs 2131 drivers/block/drbd/drbd_state.c if (cs.peer != os.peer) cs 2134 drivers/block/drbd/drbd_state.c if (cs.conn != os.conn) cs 2137 drivers/block/drbd/drbd_state.c if (cs.disk != os.disk) cs 2140 drivers/block/drbd/drbd_state.c if (cs.pdsk != os.pdsk) cs 2147 drivers/block/drbd/drbd_state.c (*pcs).i = cs.i; cs 557 drivers/block/paride/pcd.c static void pcd_sleep(int cs) cs 559 drivers/block/paride/pcd.c schedule_timeout_interruptible(cs); cs 549 drivers/block/paride/pf.c static void pf_sleep(int cs) cs 551 drivers/block/paride/pf.c schedule_timeout_interruptible(cs); cs 283 drivers/block/paride/pg.c static void pg_sleep(int cs) cs 285 drivers/block/paride/pg.c schedule_timeout_interruptible(cs); cs 389 drivers/block/paride/pt.c static void pt_sleep(int cs) cs 391 drivers/block/paride/pt.c schedule_timeout_interruptible(cs); cs 69 drivers/bluetooth/btmtkuart.c u8 cs; cs 887 drivers/bluetooth/btmtkuart.c shdr->cs = 0; /* MT7622 doesn't care about checksum value */ cs 62 drivers/bus/imx-weim.c struct cs_timing cs[MAX_CS_COUNT]; cs 93 drivers/bus/imx-weim.c int cs = 0; cs 104 drivers/bus/imx-weim.c cs = val; cs 107 drivers/bus/imx-weim.c gprval |= val << cs * 3; cs 169 drivers/bus/imx-weim.c cst = &ts->cs[cs_idx]; cs 654 drivers/bus/mvebu-mbus.c w = &mvebu_mbus_dram_info.cs[i]; cs 682 drivers/bus/mvebu-mbus.c w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++]; cs 699 drivers/bus/mvebu-mbus.c int cs; cs 703 drivers/bus/mvebu-mbus.c for (i = 0, cs = 0; i < 4; i++) { cs 717 drivers/bus/mvebu-mbus.c w = &mvebu_mbus_dram_info.cs[cs++]; cs 726 drivers/bus/mvebu-mbus.c mvebu_mbus_dram_info.num_cs = cs; cs 755 drivers/bus/mvebu-mbus.c int cs; cs 759 drivers/bus/mvebu-mbus.c for (i = 0, cs = 0; i < 2; i++) { cs 768 drivers/bus/mvebu-mbus.c w = &mvebu_mbus_dram_info.cs[cs++]; cs 778 drivers/bus/mvebu-mbus.c mvebu_mbus_dram_info.num_cs = cs; cs 965 drivers/bus/mvebu-mbus.c const struct mbus_dram_window *cs = dram->cs + i; cs 967 drivers/bus/mvebu-mbus.c if (cs->base <= phyaddr && cs 968 drivers/bus/mvebu-mbus.c phyaddr <= (cs->base + cs->size - 1)) { cs 970 drivers/bus/mvebu-mbus.c *attr = cs->mbus_attr; cs 70 drivers/char/hpet.c static u64 read_hpet(struct clocksource *cs) cs 60 drivers/clocksource/acpi_pm.c static u64 acpi_pm_read(struct clocksource *cs) cs 83 drivers/clocksource/acpi_pm.c static u64 acpi_pm_read_slow(struct clocksource *cs) cs 58 drivers/clocksource/arc_timer.c static u64 arc_read_gfrc(struct clocksource *cs) cs 128 drivers/clocksource/arc_timer.c static u64 arc_read_rtc(struct clocksource *cs) cs 196 drivers/clocksource/arc_timer.c static u64 arc_read_timer1(struct clocksource *cs) cs 178 drivers/clocksource/arm_arch_timer.c static u64 arch_counter_read(struct clocksource *cs) cs 195 drivers/clocksource/arm_global_timer.c static u64 gt_clocksource_read(struct clocksource *cs) cs 200 drivers/clocksource/arm_global_timer.c static void gt_resume(struct clocksource *cs) cs 30 drivers/clocksource/clksrc-dbx500-prcmu.c static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) cs 44 drivers/clocksource/dw_apb_timer.c clocksource_to_dw_apb_clocksource(struct clocksource *cs) cs 46 drivers/clocksource/dw_apb_timer.c return container_of(cs, struct dw_apb_clocksource, cs); cs 350 drivers/clocksource/dw_apb_timer.c static u64 __apbt_read_clocksource(struct clocksource *cs) cs 354 drivers/clocksource/dw_apb_timer.c clocksource_to_dw_apb_clocksource(cs); cs 362 drivers/clocksource/dw_apb_timer.c static void apbt_restart_clocksource(struct clocksource *cs) cs 365 drivers/clocksource/dw_apb_timer.c clocksource_to_dw_apb_clocksource(cs); cs 393 drivers/clocksource/dw_apb_timer.c dw_cs->cs.name = name; cs 394 drivers/clocksource/dw_apb_timer.c dw_cs->cs.rating = rating; cs 395 drivers/clocksource/dw_apb_timer.c dw_cs->cs.read = __apbt_read_clocksource; cs 396 drivers/clocksource/dw_apb_timer.c dw_cs->cs.mask = CLOCKSOURCE_MASK(32); cs 397 drivers/clocksource/dw_apb_timer.c dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 398 drivers/clocksource/dw_apb_timer.c dw_cs->cs.resume = apbt_restart_clocksource; cs 410 drivers/clocksource/dw_apb_timer.c clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq); cs 90 drivers/clocksource/dw_apb_timer_of.c struct dw_apb_clocksource *cs; cs 95 drivers/clocksource/dw_apb_timer_of.c cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate); cs 96 drivers/clocksource/dw_apb_timer_of.c if (!cs) cs 99 drivers/clocksource/dw_apb_timer_of.c dw_apb_clocksource_start(cs); cs 100 drivers/clocksource/dw_apb_timer_of.c dw_apb_clocksource_register(cs); cs 33 drivers/clocksource/em_sti.c struct clocksource cs; cs 181 drivers/clocksource/em_sti.c static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) cs 183 drivers/clocksource/em_sti.c return container_of(cs, struct em_sti_priv, cs); cs 186 drivers/clocksource/em_sti.c static u64 em_sti_clocksource_read(struct clocksource *cs) cs 188 drivers/clocksource/em_sti.c return em_sti_count(cs_to_em_sti(cs)); cs 191 drivers/clocksource/em_sti.c static int em_sti_clocksource_enable(struct clocksource *cs) cs 193 drivers/clocksource/em_sti.c struct em_sti_priv *p = cs_to_em_sti(cs); cs 198 drivers/clocksource/em_sti.c static void em_sti_clocksource_disable(struct clocksource *cs) cs 200 drivers/clocksource/em_sti.c em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); cs 203 drivers/clocksource/em_sti.c static void em_sti_clocksource_resume(struct clocksource *cs) cs 205 drivers/clocksource/em_sti.c em_sti_clocksource_enable(cs); cs 210 drivers/clocksource/em_sti.c struct clocksource *cs = &p->cs; cs 212 drivers/clocksource/em_sti.c cs->name = dev_name(&p->pdev->dev); cs 213 drivers/clocksource/em_sti.c cs->rating = 200; cs 214 drivers/clocksource/em_sti.c cs->read = em_sti_clocksource_read; cs 215 drivers/clocksource/em_sti.c cs->enable = em_sti_clocksource_enable; cs 216 drivers/clocksource/em_sti.c cs->disable = em_sti_clocksource_disable; cs 217 drivers/clocksource/em_sti.c cs->suspend = em_sti_clocksource_disable; cs 218 drivers/clocksource/em_sti.c cs->resume = em_sti_clocksource_resume; cs 219 drivers/clocksource/em_sti.c cs->mask = CLOCKSOURCE_MASK(48); cs 220 drivers/clocksource/em_sti.c cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 224 drivers/clocksource/em_sti.c clocksource_register_hz(cs, p->rate); cs 197 drivers/clocksource/exynos_mct.c static u64 exynos4_frc_read(struct clocksource *cs) cs 202 drivers/clocksource/exynos_mct.c static void exynos4_frc_resume(struct clocksource *cs) cs 27 drivers/clocksource/h8300_timer16.c struct clocksource cs; cs 71 drivers/clocksource/h8300_timer16.c static inline struct timer16_priv *cs_to_priv(struct clocksource *cs) cs 73 drivers/clocksource/h8300_timer16.c return container_of(cs, struct timer16_priv, cs); cs 76 drivers/clocksource/h8300_timer16.c static u64 timer16_clocksource_read(struct clocksource *cs) cs 78 drivers/clocksource/h8300_timer16.c struct timer16_priv *p = cs_to_priv(cs); cs 87 drivers/clocksource/h8300_timer16.c static int timer16_enable(struct clocksource *cs) cs 89 drivers/clocksource/h8300_timer16.c struct timer16_priv *p = cs_to_priv(cs); cs 103 drivers/clocksource/h8300_timer16.c static void timer16_disable(struct clocksource *cs) cs 105 drivers/clocksource/h8300_timer16.c struct timer16_priv *p = cs_to_priv(cs); cs 116 drivers/clocksource/h8300_timer16.c .cs = { cs 172 drivers/clocksource/h8300_timer16.c IRQF_TIMER, timer16_priv.cs.name, &timer16_priv); cs 178 drivers/clocksource/h8300_timer16.c clocksource_register_hz(&timer16_priv.cs, cs 26 drivers/clocksource/h8300_tpu.c struct clocksource cs; cs 63 drivers/clocksource/h8300_tpu.c static inline struct tpu_priv *cs_to_priv(struct clocksource *cs) cs 65 drivers/clocksource/h8300_tpu.c return container_of(cs, struct tpu_priv, cs); cs 68 drivers/clocksource/h8300_tpu.c static u64 tpu_clocksource_read(struct clocksource *cs) cs 70 drivers/clocksource/h8300_tpu.c struct tpu_priv *p = cs_to_priv(cs); cs 82 drivers/clocksource/h8300_tpu.c static int tpu_clocksource_enable(struct clocksource *cs) cs 84 drivers/clocksource/h8300_tpu.c struct tpu_priv *p = cs_to_priv(cs); cs 97 drivers/clocksource/h8300_tpu.c static void tpu_clocksource_disable(struct clocksource *cs) cs 99 drivers/clocksource/h8300_tpu.c struct tpu_priv *p = cs_to_priv(cs); cs 109 drivers/clocksource/h8300_tpu.c .cs = { cs 149 drivers/clocksource/h8300_tpu.c return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); cs 36 drivers/clocksource/i8253.c static u64 i8253_read(struct clocksource *cs) cs 33 drivers/clocksource/ingenic-timer.c struct clocksource cs; cs 50 drivers/clocksource/ingenic-timer.c static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs) cs 173 drivers/clocksource/ingenic-timer.c struct clocksource *cs = &tcu->cs; cs 202 drivers/clocksource/ingenic-timer.c cs->name = "ingenic-timer"; cs 203 drivers/clocksource/ingenic-timer.c cs->rating = 200; cs 204 drivers/clocksource/ingenic-timer.c cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 205 drivers/clocksource/ingenic-timer.c cs->mask = CLOCKSOURCE_MASK(16); cs 206 drivers/clocksource/ingenic-timer.c cs->read = ingenic_tcu_timer_cs_read; cs 208 drivers/clocksource/ingenic-timer.c err = clocksource_register_hz(cs, rate); cs 294 drivers/clocksource/ingenic-timer.c clocksource_unregister(&tcu->cs); cs 60 drivers/clocksource/jcore-pit.c static u64 jcore_clocksource_read(struct clocksource *cs) cs 152 drivers/clocksource/mips-gic-timer.c static u64 gic_hpt_read(struct clocksource *cs) cs 53 drivers/clocksource/mmio.c struct clocksource_mmio *cs; cs 58 drivers/clocksource/mmio.c cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); cs 59 drivers/clocksource/mmio.c if (!cs) cs 62 drivers/clocksource/mmio.c cs->reg = base; cs 63 drivers/clocksource/mmio.c cs->clksrc.name = name; cs 64 drivers/clocksource/mmio.c cs->clksrc.rating = rating; cs 65 drivers/clocksource/mmio.c cs->clksrc.read = read; cs 66 drivers/clocksource/mmio.c cs->clksrc.mask = CLOCKSOURCE_MASK(bits); cs 67 drivers/clocksource/mmio.c cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 69 drivers/clocksource/mmio.c return clocksource_register_hz(&cs->clksrc, hz); cs 86 drivers/clocksource/mxs_timer.c static u64 timrotv1_get_cycles(struct clocksource *cs) cs 15 drivers/clocksource/numachip.c static cycles_t numachip2_timer_read(struct clocksource *cs) cs 293 drivers/clocksource/samsung_pwm_timer.c static void samsung_clocksource_suspend(struct clocksource *cs) cs 298 drivers/clocksource/samsung_pwm_timer.c static void samsung_clocksource_resume(struct clocksource *cs) cs 42 drivers/clocksource/scx200_hrt.c static u64 read_hrt(struct clocksource *cs) cs 102 drivers/clocksource/sh_cmt.c struct clocksource cs; cs 599 drivers/clocksource/sh_cmt.c static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) cs 601 drivers/clocksource/sh_cmt.c return container_of(cs, struct sh_cmt_channel, cs); cs 604 drivers/clocksource/sh_cmt.c static u64 sh_cmt_clocksource_read(struct clocksource *cs) cs 606 drivers/clocksource/sh_cmt.c struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); cs 623 drivers/clocksource/sh_cmt.c static int sh_cmt_clocksource_enable(struct clocksource *cs) cs 626 drivers/clocksource/sh_cmt.c struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); cs 639 drivers/clocksource/sh_cmt.c static void sh_cmt_clocksource_disable(struct clocksource *cs) cs 641 drivers/clocksource/sh_cmt.c struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); cs 649 drivers/clocksource/sh_cmt.c static void sh_cmt_clocksource_suspend(struct clocksource *cs) cs 651 drivers/clocksource/sh_cmt.c struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); cs 660 drivers/clocksource/sh_cmt.c static void sh_cmt_clocksource_resume(struct clocksource *cs) cs 662 drivers/clocksource/sh_cmt.c struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); cs 674 drivers/clocksource/sh_cmt.c struct clocksource *cs = &ch->cs; cs 676 drivers/clocksource/sh_cmt.c cs->name = name; cs 677 drivers/clocksource/sh_cmt.c cs->rating = 125; cs 678 drivers/clocksource/sh_cmt.c cs->read = sh_cmt_clocksource_read; cs 679 drivers/clocksource/sh_cmt.c cs->enable = sh_cmt_clocksource_enable; cs 680 drivers/clocksource/sh_cmt.c cs->disable = sh_cmt_clocksource_disable; cs 681 drivers/clocksource/sh_cmt.c cs->suspend = sh_cmt_clocksource_suspend; cs 682 drivers/clocksource/sh_cmt.c cs->resume = sh_cmt_clocksource_resume; cs 683 drivers/clocksource/sh_cmt.c cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8); cs 684 drivers/clocksource/sh_cmt.c cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 689 drivers/clocksource/sh_cmt.c clocksource_register_hz(cs, ch->cmt->rate); cs 43 drivers/clocksource/sh_tmu.c struct clocksource cs; cs 244 drivers/clocksource/sh_tmu.c static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) cs 246 drivers/clocksource/sh_tmu.c return container_of(cs, struct sh_tmu_channel, cs); cs 249 drivers/clocksource/sh_tmu.c static u64 sh_tmu_clocksource_read(struct clocksource *cs) cs 251 drivers/clocksource/sh_tmu.c struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); cs 256 drivers/clocksource/sh_tmu.c static int sh_tmu_clocksource_enable(struct clocksource *cs) cs 258 drivers/clocksource/sh_tmu.c struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); cs 271 drivers/clocksource/sh_tmu.c static void sh_tmu_clocksource_disable(struct clocksource *cs) cs 273 drivers/clocksource/sh_tmu.c struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); cs 282 drivers/clocksource/sh_tmu.c static void sh_tmu_clocksource_suspend(struct clocksource *cs) cs 284 drivers/clocksource/sh_tmu.c struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); cs 295 drivers/clocksource/sh_tmu.c static void sh_tmu_clocksource_resume(struct clocksource *cs) cs 297 drivers/clocksource/sh_tmu.c struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); cs 311 drivers/clocksource/sh_tmu.c struct clocksource *cs = &ch->cs; cs 313 drivers/clocksource/sh_tmu.c cs->name = name; cs 314 drivers/clocksource/sh_tmu.c cs->rating = 200; cs 315 drivers/clocksource/sh_tmu.c cs->read = sh_tmu_clocksource_read; cs 316 drivers/clocksource/sh_tmu.c cs->enable = sh_tmu_clocksource_enable; cs 317 drivers/clocksource/sh_tmu.c cs->disable = sh_tmu_clocksource_disable; cs 318 drivers/clocksource/sh_tmu.c cs->suspend = sh_tmu_clocksource_suspend; cs 319 drivers/clocksource/sh_tmu.c cs->resume = sh_tmu_clocksource_resume; cs 320 drivers/clocksource/sh_tmu.c cs->mask = CLOCKSOURCE_MASK(32); cs 321 drivers/clocksource/sh_tmu.c cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 326 drivers/clocksource/sh_tmu.c clocksource_register_hz(cs, ch->tmu->rate); cs 87 drivers/clocksource/timer-atlas7.c static u64 sirfsoc_timer_read(struct clocksource *cs) cs 126 drivers/clocksource/timer-atlas7.c static void sirfsoc_clocksource_suspend(struct clocksource *cs) cs 134 drivers/clocksource/timer-atlas7.c static void sirfsoc_clocksource_resume(struct clocksource *cs) cs 73 drivers/clocksource/timer-atmel-pit.c static u64 read_pit_clk(struct clocksource *cs) cs 75 drivers/clocksource/timer-atmel-pit.c struct pit_data *data = clksrc_to_pit_data(cs); cs 82 drivers/clocksource/timer-atmel-st.c static u64 read_clk32k(struct clocksource *cs) cs 50 drivers/clocksource/timer-atmel-tcb.c static u64 tc_get_cycles(struct clocksource *cs) cs 65 drivers/clocksource/timer-atmel-tcb.c static u64 tc_get_cycles32(struct clocksource *cs) cs 70 drivers/clocksource/timer-atmel-tcb.c static void tc_clksrc_suspend(struct clocksource *cs) cs 85 drivers/clocksource/timer-atmel-tcb.c static void tc_clksrc_resume(struct clocksource *cs) cs 85 drivers/clocksource/timer-cadence-ttc.c struct clocksource cs; cs 89 drivers/clocksource/timer-cadence-ttc.c container_of(x, struct ttc_timer_clocksource, cs) cs 154 drivers/clocksource/timer-cadence-ttc.c static u64 __ttc_clocksource_read(struct clocksource *cs) cs 156 drivers/clocksource/timer-cadence-ttc.c struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; cs 348 drivers/clocksource/timer-cadence-ttc.c ttccs->cs.name = "ttc_clocksource"; cs 349 drivers/clocksource/timer-cadence-ttc.c ttccs->cs.rating = 200; cs 350 drivers/clocksource/timer-cadence-ttc.c ttccs->cs.read = __ttc_clocksource_read; cs 351 drivers/clocksource/timer-cadence-ttc.c ttccs->cs.mask = CLOCKSOURCE_MASK(timer_width); cs 352 drivers/clocksource/timer-cadence-ttc.c ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 365 drivers/clocksource/timer-cadence-ttc.c err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); cs 51 drivers/clocksource/timer-pistachio.c struct clocksource cs; cs 56 drivers/clocksource/timer-pistachio.c #define to_pistachio_clocksource(cs) \ cs 57 drivers/clocksource/timer-pistachio.c container_of(cs, struct pistachio_clocksource, cs) cs 71 drivers/clocksource/timer-pistachio.c pistachio_clocksource_read_cycles(struct clocksource *cs) cs 73 drivers/clocksource/timer-pistachio.c struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); cs 92 drivers/clocksource/timer-pistachio.c return pistachio_clocksource_read_cycles(&pcs_gpt.cs); cs 95 drivers/clocksource/timer-pistachio.c static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx, cs 98 drivers/clocksource/timer-pistachio.c struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); cs 110 drivers/clocksource/timer-pistachio.c static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx) cs 112 drivers/clocksource/timer-pistachio.c struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); cs 115 drivers/clocksource/timer-pistachio.c pistachio_clksrc_set_mode(cs, timeridx, false); cs 117 drivers/clocksource/timer-pistachio.c pistachio_clksrc_set_mode(cs, timeridx, true); cs 120 drivers/clocksource/timer-pistachio.c static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx) cs 123 drivers/clocksource/timer-pistachio.c pistachio_clksrc_set_mode(cs, timeridx, false); cs 126 drivers/clocksource/timer-pistachio.c static int pistachio_clocksource_enable(struct clocksource *cs) cs 128 drivers/clocksource/timer-pistachio.c pistachio_clksrc_enable(cs, 0); cs 132 drivers/clocksource/timer-pistachio.c static void pistachio_clocksource_disable(struct clocksource *cs) cs 134 drivers/clocksource/timer-pistachio.c pistachio_clksrc_disable(cs, 0); cs 139 drivers/clocksource/timer-pistachio.c .cs = { cs 215 drivers/clocksource/timer-pistachio.c return clocksource_register_hz(&pcs_gpt.cs, rate); cs 74 drivers/clocksource/timer-prima2.c static u64 notrace sirfsoc_timer_read(struct clocksource *cs) cs 122 drivers/clocksource/timer-prima2.c static void sirfsoc_clocksource_suspend(struct clocksource *cs) cs 135 drivers/clocksource/timer-prima2.c static void sirfsoc_clocksource_resume(struct clocksource *cs) cs 83 drivers/clocksource/timer-qcom.c static notrace u64 msm_read_timer_count(struct clocksource *cs) cs 155 drivers/clocksource/timer-qcom.c struct clocksource *cs = &msm_clocksource; cs 187 drivers/clocksource/timer-qcom.c res = clocksource_register_hz(cs, dgt_hz); cs 156 drivers/clocksource/timer-rda.c static u64 rda_hwtimer_read(struct clocksource *cs) cs 39 drivers/clocksource/timer-riscv.c static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs) cs 163 drivers/clocksource/timer-sprd.c static u64 sprd_suspend_timer_read(struct clocksource *cs) cs 166 drivers/clocksource/timer-sprd.c TIMER_VALUE_SHDW_LO) & cs->mask; cs 169 drivers/clocksource/timer-sprd.c static int sprd_suspend_timer_enable(struct clocksource *cs) cs 178 drivers/clocksource/timer-sprd.c static void sprd_suspend_timer_disable(struct clocksource *cs) cs 158 drivers/clocksource/timer-sun5i.c struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); cs 160 drivers/clocksource/timer-sun5i.c return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1)); cs 168 drivers/clocksource/timer-sun5i.c struct sun5i_timer_clksrc *cs = container_of(timer, struct sun5i_timer_clksrc, timer); cs 172 drivers/clocksource/timer-sun5i.c clocksource_unregister(&cs->clksrc); cs 176 drivers/clocksource/timer-sun5i.c clocksource_register_hz(&cs->clksrc, ndata->new_rate); cs 190 drivers/clocksource/timer-sun5i.c struct sun5i_timer_clksrc *cs; cs 194 drivers/clocksource/timer-sun5i.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 195 drivers/clocksource/timer-sun5i.c if (!cs) cs 211 drivers/clocksource/timer-sun5i.c cs->timer.base = base; cs 212 drivers/clocksource/timer-sun5i.c cs->timer.clk = clk; cs 213 drivers/clocksource/timer-sun5i.c cs->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clksrc; cs 214 drivers/clocksource/timer-sun5i.c cs->timer.clk_rate_cb.next = NULL; cs 216 drivers/clocksource/timer-sun5i.c ret = clk_notifier_register(clk, &cs->timer.clk_rate_cb); cs 226 drivers/clocksource/timer-sun5i.c cs->clksrc.name = node->name; cs 227 drivers/clocksource/timer-sun5i.c cs->clksrc.rating = 340; cs 228 drivers/clocksource/timer-sun5i.c cs->clksrc.read = sun5i_clksrc_read; cs 229 drivers/clocksource/timer-sun5i.c cs->clksrc.mask = CLOCKSOURCE_MASK(32); cs 230 drivers/clocksource/timer-sun5i.c cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; cs 232 drivers/clocksource/timer-sun5i.c ret = clocksource_register_hz(&cs->clksrc, rate); cs 241 drivers/clocksource/timer-sun5i.c clk_notifier_unregister(clk, &cs->timer.clk_rate_cb); cs 245 drivers/clocksource/timer-sun5i.c kfree(cs); cs 194 drivers/clocksource/timer-tegra.c static u64 tegra_rtc_read_ms(struct clocksource *cs) cs 49 drivers/clocksource/timer-ti-32k.c struct clocksource cs; cs 52 drivers/clocksource/timer-ti-32k.c static inline struct ti_32k *to_ti_32k(struct clocksource *cs) cs 54 drivers/clocksource/timer-ti-32k.c return container_of(cs, struct ti_32k, cs); cs 57 drivers/clocksource/timer-ti-32k.c static u64 notrace ti_32k_read_cycles(struct clocksource *cs) cs 59 drivers/clocksource/timer-ti-32k.c struct ti_32k *ti = to_ti_32k(cs); cs 65 drivers/clocksource/timer-ti-32k.c .cs = { cs 76 drivers/clocksource/timer-ti-32k.c return ti_32k_read_cycles(&ti_32k_timer.cs); cs 90 drivers/clocksource/timer-ti-32k.c ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; cs 107 drivers/clocksource/timer-ti-32k.c ret = clocksource_register_hz(&ti_32k_timer.cs, 32768); cs 43 drivers/clocksource/timer-vt8500.c static u64 vt8500_timer_read(struct clocksource *cs) cs 248 drivers/cpufreq/cpufreq_conservative.c gov_show_one(cs, down_threshold); cs 249 drivers/cpufreq/cpufreq_conservative.c gov_show_one(cs, freq_step); cs 327 drivers/crypto/marvell/cesa.c const struct mbus_dram_window *cs = dram->cs + i; cs 329 drivers/crypto/marvell/cesa.c writel(((cs->size - 1) & 0xffff0000) | cs 330 drivers/crypto/marvell/cesa.c (cs->mbus_attr << 8) | cs 333 drivers/crypto/marvell/cesa.c writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); cs 170 drivers/crypto/nx/nx-842-powernv.c (csb)->cs, (csb)->cc, (csb)->ce, \ cs 212 drivers/crypto/nx/nx-842-powernv.c if (csb->cs) { cs 286 drivers/dma/imx-sdma.c u32 cs; cs 1178 drivers/dma/mv_xor.c const struct mbus_dram_window *cs = dram->cs + i; cs 1180 drivers/dma/mv_xor.c writel((cs->base & 0xffff0000) | cs 1181 drivers/dma/mv_xor.c (cs->mbus_attr << 8) | cs 1183 drivers/dma/mv_xor.c writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); cs 1186 drivers/dma/mv_xor.c xordev->win_start[i] = cs->base; cs 1187 drivers/dma/mv_xor.c xordev->win_end[i] = cs->base + cs->size - 1; cs 961 drivers/edac/amd64_edac.c int cs, umc; cs 967 drivers/edac/amd64_edac.c for_each_chip_select(cs, umc, pvt) { cs 968 drivers/edac/amd64_edac.c base = &pvt->csels[umc].csbases[cs]; cs 969 drivers/edac/amd64_edac.c base_sec = &pvt->csels[umc].csbases_sec[cs]; cs 971 drivers/edac/amd64_edac.c base_reg = umc_base_reg + (cs * 4); cs 972 drivers/edac/amd64_edac.c base_reg_sec = umc_base_reg_sec + (cs * 4); cs 976 drivers/edac/amd64_edac.c umc, cs, *base, base_reg); cs 980 drivers/edac/amd64_edac.c umc, cs, *base_sec, base_reg_sec); cs 986 drivers/edac/amd64_edac.c for_each_chip_select_mask(cs, umc, pvt) { cs 987 drivers/edac/amd64_edac.c mask = &pvt->csels[umc].csmasks[cs]; cs 988 drivers/edac/amd64_edac.c mask_sec = &pvt->csels[umc].csmasks_sec[cs]; cs 990 drivers/edac/amd64_edac.c mask_reg = umc_mask_reg + (cs * 4); cs 991 drivers/edac/amd64_edac.c mask_reg_sec = umc_mask_reg_sec + (cs * 4); cs 995 drivers/edac/amd64_edac.c umc, cs, *mask, mask_reg); cs 999 drivers/edac/amd64_edac.c umc, cs, *mask_sec, mask_reg_sec); cs 1009 drivers/edac/amd64_edac.c int cs; cs 1016 drivers/edac/amd64_edac.c for_each_chip_select(cs, 0, pvt) { cs 1017 drivers/edac/amd64_edac.c int reg0 = DCSB0 + (cs * 4); cs 1018 drivers/edac/amd64_edac.c int reg1 = DCSB1 + (cs * 4); cs 1019 drivers/edac/amd64_edac.c u32 *base0 = &pvt->csels[0].csbases[cs]; cs 1020 drivers/edac/amd64_edac.c u32 *base1 = &pvt->csels[1].csbases[cs]; cs 1024 drivers/edac/amd64_edac.c cs, *base0, reg0); cs 1031 drivers/edac/amd64_edac.c cs, *base1, (pvt->fam == 0x10) ? reg1 cs 1035 drivers/edac/amd64_edac.c for_each_chip_select_mask(cs, 0, pvt) { cs 1036 drivers/edac/amd64_edac.c int reg0 = DCSM0 + (cs * 4); cs 1037 drivers/edac/amd64_edac.c int reg1 = DCSM1 + (cs * 4); cs 1038 drivers/edac/amd64_edac.c u32 *mask0 = &pvt->csels[0].csmasks[cs]; cs 1039 drivers/edac/amd64_edac.c u32 *mask1 = &pvt->csels[1].csmasks[cs]; cs 1043 drivers/edac/amd64_edac.c cs, *mask0, reg0); cs 1050 drivers/edac/amd64_edac.c cs, *mask1, (pvt->fam == 0x10) ? reg1 cs 2909 drivers/edac/amd64_edac.c u8 umc, cs; cs 2925 drivers/edac/amd64_edac.c for_each_chip_select(cs, umc, pvt) { cs 2926 drivers/edac/amd64_edac.c if (!csrow_enabled(cs, umc, pvt)) cs 2930 drivers/edac/amd64_edac.c dimm = mci->csrows[cs]->channels[umc]->dimm; cs 2933 drivers/edac/amd64_edac.c pvt->mc_node_id, cs); cs 2935 drivers/edac/amd64_edac.c dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs); cs 27 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs) cs 28 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs)) cs 29 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs) cs 30 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2) cs 31 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs)) cs 32 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4) cs 33 drivers/edac/armada_xp_edac.c #define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs)) cs 69 drivers/edac/armada_xp_edac.c #define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs) cs 83 drivers/edac/armada_xp_edac.c uint8_t cs, uint8_t bank, uint16_t row, cs 88 drivers/edac/armada_xp_edac.c if (drvdata->cs_addr_sel[cs]) cs 100 drivers/edac/armada_xp_edac.c if (drvdata->cs_addr_sel[cs]) cs 112 drivers/edac/armada_xp_edac.c if (drvdata->cs_addr_sel[cs]) cs 88 drivers/edac/pasemi_edac.c u32 cs; cs 95 drivers/edac/pasemi_edac.c cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >> cs 102 drivers/edac/pasemi_edac.c mci->csrows[cs]->first_page, 0, 0, cs 103 drivers/edac/pasemi_edac.c cs, 0, -1, mci->ctl_name, ""); cs 109 drivers/edac/pasemi_edac.c mci->csrows[cs]->first_page, 0, 0, cs 110 drivers/edac/pasemi_edac.c cs, 0, -1, mci->ctl_name, ""); cs 43 drivers/firmware/efi/libstub/string.c int strncmp(const char *cs, const char *ct, size_t count) cs 48 drivers/firmware/efi/libstub/string.c c1 = *cs++; cs 193 drivers/gpio/gpiolib-of.c u32 cs; cs 197 drivers/gpio/gpiolib-of.c ret = of_property_read_u32(child, "reg", &cs); cs 200 drivers/gpio/gpiolib-of.c if (cs == index) { cs 105 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) cs 116 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (cs->in.num_chunks == 0) cs 119 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); cs 123 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); cs 138 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c chunk_array_user = u64_to_user_ptr(cs->in.chunks); cs 140 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c sizeof(uint64_t)*cs->in.num_chunks)) { cs 145 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c p->nchunks = cs->in.num_chunks; cs 571 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c union drm_amdgpu_cs *cs) cs 585 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (cs->in.bo_list_handle) { cs 589 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, cs 1272 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c union drm_amdgpu_cs *cs) cs 1321 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c cs->out.handle = seq; cs 1353 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c union drm_amdgpu_cs *cs = data; cs 1398 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_submit(&parser, cs); cs 566 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c struct clock_source *cs, cs 570 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); cs 583 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c if (cs->id == CLOCK_SOURCE_ID_DP_DTO || cs 584 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c cs->id == CLOCK_SOURCE_ID_EXTERNAL) { cs 599 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c struct clock_source *cs, cs 603 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); cs 615 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c if (cs->id == CLOCK_SOURCE_ID_DP_DTO || cs 616 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c cs->id == CLOCK_SOURCE_ID_EXTERNAL) { cs 252 drivers/gpu/drm/arm/malidp_crtc.c struct malidp_crtc_state *cs = to_malidp_crtc_state(state); cs 253 drivers/gpu/drm/arm/malidp_crtc.c struct malidp_se_config *s = &cs->scaler_config; cs 259 drivers/gpu/drm/arm/malidp_crtc.c u8 scaling = cs->scaled_planes_mask; cs 120 drivers/gpu/drm/arm/malidp_drv.c struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state); cs 124 drivers/gpu/drm/arm/malidp_drv.c struct malidp_se_config *s = &cs->scaler_config; cs 132 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c u8 cs[192][8]; cs 190 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c u8 *cs; cs 192 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c cs = dw->cs[dw->iec_offset++]; cs 201 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c sample |= *cs++ << 24; cs 212 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c u8 cs[4]; cs 215 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c snd_pcm_create_iec958_consumer(runtime, cs, sizeof(cs)); cs 217 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c memset(dw->cs, 0, sizeof(dw->cs)); cs 220 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c cs[2] &= ~IEC958_AES2_CON_CHANNEL; cs 221 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c cs[2] |= (ch + 1) << 4; cs 223 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c for (i = 0; i < ARRAY_SIZE(cs); i++) { cs 224 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c unsigned c = cs[i]; cs 227 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c dw->cs[i * 8 + j][ch] = (c & 1) << 2; cs 230 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c dw->cs[0][0] |= BIT(4); cs 247 drivers/gpu/drm/i915/display/intel_overlay.c u32 *cs; cs 255 drivers/gpu/drm/i915/display/intel_overlay.c cs = intel_ring_begin(rq, 4); cs 256 drivers/gpu/drm/i915/display/intel_overlay.c if (IS_ERR(cs)) { cs 258 drivers/gpu/drm/i915/display/intel_overlay.c return PTR_ERR(cs); cs 266 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON; cs 267 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = overlay->flip_addr | OFC_UPDATE; cs 268 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; cs 269 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_NOOP; cs 270 drivers/gpu/drm/i915/display/intel_overlay.c intel_ring_advance(rq, cs); cs 306 drivers/gpu/drm/i915/display/intel_overlay.c u32 tmp, *cs; cs 322 drivers/gpu/drm/i915/display/intel_overlay.c cs = intel_ring_begin(rq, 2); cs 323 drivers/gpu/drm/i915/display/intel_overlay.c if (IS_ERR(cs)) { cs 325 drivers/gpu/drm/i915/display/intel_overlay.c return PTR_ERR(cs); cs 328 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE; cs 329 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = flip_addr; cs 330 drivers/gpu/drm/i915/display/intel_overlay.c intel_ring_advance(rq, cs); cs 387 drivers/gpu/drm/i915/display/intel_overlay.c u32 *cs, flip_addr = overlay->flip_addr; cs 401 drivers/gpu/drm/i915/display/intel_overlay.c cs = intel_ring_begin(rq, 6); cs 402 drivers/gpu/drm/i915/display/intel_overlay.c if (IS_ERR(cs)) { cs 404 drivers/gpu/drm/i915/display/intel_overlay.c return PTR_ERR(cs); cs 408 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE; cs 409 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = flip_addr; cs 410 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; cs 413 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF; cs 414 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = flip_addr; cs 415 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; cs 417 drivers/gpu/drm/i915/display/intel_overlay.c intel_ring_advance(rq, cs); cs 440 drivers/gpu/drm/i915/display/intel_overlay.c u32 *cs; cs 460 drivers/gpu/drm/i915/display/intel_overlay.c cs = intel_ring_begin(rq, 2); cs 461 drivers/gpu/drm/i915/display/intel_overlay.c if (IS_ERR(cs)) { cs 463 drivers/gpu/drm/i915/display/intel_overlay.c return PTR_ERR(cs); cs 466 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; cs 467 drivers/gpu/drm/i915/display/intel_overlay.c *cs++ = MI_NOOP; cs 468 drivers/gpu/drm/i915/display/intel_overlay.c intel_ring_advance(rq, cs); cs 989 drivers/gpu/drm/i915/gem/i915_gem_context.c u32 *cs; cs 996 drivers/gpu/drm/i915/gem/i915_gem_context.c cs = intel_ring_begin(rq, 6); cs 997 drivers/gpu/drm/i915/gem/i915_gem_context.c if (IS_ERR(cs)) cs 998 drivers/gpu/drm/i915/gem/i915_gem_context.c return PTR_ERR(cs); cs 1000 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = MI_LOAD_REGISTER_IMM(2); cs 1002 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); cs 1003 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = upper_32_bits(pd_daddr); cs 1004 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); cs 1005 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = lower_32_bits(pd_daddr); cs 1007 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = MI_NOOP; cs 1008 drivers/gpu/drm/i915/gem/i915_gem_context.c intel_ring_advance(rq, cs); cs 1012 drivers/gpu/drm/i915/gem/i915_gem_context.c cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); cs 1013 drivers/gpu/drm/i915/gem/i915_gem_context.c if (IS_ERR(cs)) cs 1014 drivers/gpu/drm/i915/gem/i915_gem_context.c return PTR_ERR(cs); cs 1016 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); cs 1020 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); cs 1021 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = upper_32_bits(pd_daddr); cs 1022 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); cs 1023 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = lower_32_bits(pd_daddr); cs 1025 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = MI_NOOP; cs 1026 drivers/gpu/drm/i915/gem/i915_gem_context.c intel_ring_advance(rq, cs); cs 1112 drivers/gpu/drm/i915/gem/i915_gem_context.c u32 *cs; cs 1114 drivers/gpu/drm/i915/gem/i915_gem_context.c cs = intel_ring_begin(rq, 4); cs 1115 drivers/gpu/drm/i915/gem/i915_gem_context.c if (IS_ERR(cs)) cs 1116 drivers/gpu/drm/i915/gem/i915_gem_context.c return PTR_ERR(cs); cs 1122 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 1123 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = lower_32_bits(offset); cs 1124 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = upper_32_bits(offset); cs 1125 drivers/gpu/drm/i915/gem/i915_gem_context.c *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); cs 1127 drivers/gpu/drm/i915/gem/i915_gem_context.c intel_ring_advance(rq, cs); cs 1940 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c u32 *cs; cs 1948 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cs = intel_ring_begin(rq, 4 * 2 + 2); cs 1949 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (IS_ERR(cs)) cs 1950 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return PTR_ERR(cs); cs 1952 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c *cs++ = MI_LOAD_REGISTER_IMM(4); cs 1954 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); cs 1955 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c *cs++ = 0; cs 1957 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c *cs++ = MI_NOOP; cs 1958 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c intel_ring_advance(rq, cs); cs 186 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c u32 *cs; cs 205 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c cs = intel_ring_begin(rq, 4); cs 206 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c if (IS_ERR(cs)) { cs 209 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c return PTR_ERR(cs); cs 213 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; cs 214 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); cs 215 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); cs 216 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = v; cs 218 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 219 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = 0; cs 220 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = i915_ggtt_offset(vma) + offset; cs 221 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = v; cs 223 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; cs 224 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = i915_ggtt_offset(vma) + offset; cs 225 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = v; cs 226 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *cs++ = MI_NOOP; cs 228 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c intel_ring_advance(rq, cs); cs 235 drivers/gpu/drm/i915/gt/intel_engine.h static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) cs 245 drivers/gpu/drm/i915/gt/intel_engine.h GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); cs 389 drivers/gpu/drm/i915/gt/intel_engine.h gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) cs 398 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = GFX_OP_PIPE_CONTROL(6); cs 399 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB; cs 400 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = gtt_offset; cs 401 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = 0; cs 402 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = value; cs 404 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = 0; cs 406 drivers/gpu/drm/i915/gt/intel_engine.h return cs; cs 410 drivers/gpu/drm/i915/gt/intel_engine.h gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) cs 417 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags; cs 418 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; cs 419 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = 0; cs 420 drivers/gpu/drm/i915/gt/intel_engine.h *cs++ = value; cs 422 drivers/gpu/drm/i915/gt/intel_engine.h return cs; cs 664 drivers/gpu/drm/i915/gt/intel_engine_cs.c u32 cs[1024]; cs 683 drivers/gpu/drm/i915/gt/intel_engine_cs.c frame->ring.vaddr = frame->cs; cs 684 drivers/gpu/drm/i915/gt/intel_engine_cs.c frame->ring.size = sizeof(frame->cs); cs 697 drivers/gpu/drm/i915/gt/intel_engine_cs.c dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; cs 436 drivers/gpu/drm/i915/gt/intel_engine_types.h u32 *cs); cs 1872 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs; cs 1876 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(rq, 6); cs 1877 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 1878 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 1886 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_ARB_CHECK; cs 1887 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_NOOP; cs 1889 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 1890 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = rq->timeline->hwsp_offset; cs 1891 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = 0; cs 1892 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = rq->fence.seqno - 1; cs 1894 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(rq, cs); cs 1897 drivers/gpu/drm/i915/gt/intel_lrc.c rq->infix = intel_ring_offset(rq, cs); cs 1907 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs; cs 1928 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); cs 1929 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 1930 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 1933 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; cs 1938 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); cs 1939 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = upper_32_bits(pd_daddr); cs 1940 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); cs 1941 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = lower_32_bits(pd_daddr); cs 1943 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_NOOP; cs 1945 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(rq, cs); cs 2669 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs; cs 2671 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(rq, 4); cs 2672 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 2673 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 2688 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; cs 2691 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_BATCH_BUFFER_START_GEN8 | cs 2693 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = lower_32_bits(offset); cs 2694 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = upper_32_bits(offset); cs 2696 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(rq, cs); cs 2705 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs; cs 2707 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(rq, 6); cs 2708 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 2709 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 2711 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; cs 2713 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_BATCH_BUFFER_START_GEN8 | cs 2715 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = lower_32_bits(offset); cs 2716 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = upper_32_bits(offset); cs 2718 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; cs 2719 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_NOOP; cs 2721 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(rq, cs); cs 2740 drivers/gpu/drm/i915/gt/intel_lrc.c u32 cmd, *cs; cs 2742 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(request, 4); cs 2743 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 2744 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 2761 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = cmd; cs 2762 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; cs 2763 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = 0; /* upper addr */ cs 2764 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = 0; /* value */ cs 2765 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(request, cs); cs 2778 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs, flags = 0; cs 2820 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(request, len); cs 2821 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 2822 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 2825 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, 0, 0); cs 2828 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, cs 2831 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, flags, scratch_addr); cs 2834 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); cs 2836 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(request, cs); cs 2850 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs; cs 2863 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(request, 6); cs 2864 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 2865 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 2867 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, flags, scratch_addr); cs 2868 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(request, cs); cs 2872 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs; cs 2887 drivers/gpu/drm/i915/gt/intel_lrc.c cs = intel_ring_begin(request, 6); cs 2888 drivers/gpu/drm/i915/gt/intel_lrc.c if (IS_ERR(cs)) cs 2889 drivers/gpu/drm/i915/gt/intel_lrc.c return PTR_ERR(cs); cs 2891 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, flags, scratch_addr); cs 2892 drivers/gpu/drm/i915/gt/intel_lrc.c intel_ring_advance(request, cs); cs 2903 drivers/gpu/drm/i915/gt/intel_lrc.c static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) cs 2906 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_ARB_CHECK; cs 2907 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_NOOP; cs 2908 drivers/gpu/drm/i915/gt/intel_lrc.c request->wa_tail = intel_ring_offset(request, cs); cs 2910 drivers/gpu/drm/i915/gt/intel_lrc.c return cs; cs 2913 drivers/gpu/drm/i915/gt/intel_lrc.c static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) cs 2915 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_SEMAPHORE_WAIT | cs 2919 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = 0; cs 2920 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = intel_hws_preempt_address(request->engine); cs 2921 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = 0; cs 2923 drivers/gpu/drm/i915/gt/intel_lrc.c return cs; cs 2928 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs) cs 2930 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_USER_INTERRUPT; cs 2932 drivers/gpu/drm/i915/gt/intel_lrc.c *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; cs 2934 drivers/gpu/drm/i915/gt/intel_lrc.c cs = emit_preempt_busywait(request, cs); cs 2936 drivers/gpu/drm/i915/gt/intel_lrc.c request->tail = intel_ring_offset(request, cs); cs 2939 drivers/gpu/drm/i915/gt/intel_lrc.c return gen8_emit_wa_tail(request, cs); cs 2942 drivers/gpu/drm/i915/gt/intel_lrc.c static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) cs 2944 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_ggtt_write(cs, cs 2949 drivers/gpu/drm/i915/gt/intel_lrc.c return gen8_emit_fini_breadcrumb_footer(request, cs); cs 2952 drivers/gpu/drm/i915/gt/intel_lrc.c static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) cs 2954 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_ggtt_write_rcs(cs, cs 2962 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_pipe_control(cs, cs 2967 drivers/gpu/drm/i915/gt/intel_lrc.c return gen8_emit_fini_breadcrumb_footer(request, cs); cs 2971 drivers/gpu/drm/i915/gt/intel_lrc.c u32 *cs) cs 2973 drivers/gpu/drm/i915/gt/intel_lrc.c cs = gen8_emit_ggtt_write_rcs(cs, cs 2983 drivers/gpu/drm/i915/gt/intel_lrc.c return gen8_emit_fini_breadcrumb_footer(request, cs); cs 439 drivers/gpu/drm/i915/gt/intel_mocs.c u32 *cs; cs 447 drivers/gpu/drm/i915/gt/intel_mocs.c cs = intel_ring_begin(rq, 2 + 2 * table->n_entries); cs 448 drivers/gpu/drm/i915/gt/intel_mocs.c if (IS_ERR(cs)) cs 449 drivers/gpu/drm/i915/gt/intel_mocs.c return PTR_ERR(cs); cs 451 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries); cs 456 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = i915_mmio_reg_offset(mocs_register(engine, index)); cs 457 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = value; cs 462 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = i915_mmio_reg_offset(mocs_register(engine, index)); cs 463 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = unused_value; cs 466 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = MI_NOOP; cs 467 drivers/gpu/drm/i915/gt/intel_mocs.c intel_ring_advance(rq, cs); cs 497 drivers/gpu/drm/i915/gt/intel_mocs.c u32 *cs; cs 505 drivers/gpu/drm/i915/gt/intel_mocs.c cs = intel_ring_begin(rq, 2 + table->n_entries); cs 506 drivers/gpu/drm/i915/gt/intel_mocs.c if (IS_ERR(cs)) cs 507 drivers/gpu/drm/i915/gt/intel_mocs.c return PTR_ERR(cs); cs 509 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2); cs 515 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); cs 516 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = l3cc_combine(table, low, high); cs 523 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); cs 524 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = l3cc_combine(table, low, unused_value); cs 530 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); cs 531 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = l3cc_combine(table, unused_value, unused_value); cs 534 drivers/gpu/drm/i915/gt/intel_mocs.c *cs++ = MI_NOOP; cs 535 drivers/gpu/drm/i915/gt/intel_mocs.c intel_ring_advance(rq, cs); cs 64 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 cmd, *cs; cs 73 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); cs 74 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 75 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 77 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = cmd; cs 79 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; cs 80 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = intel_gt_scratch_offset(rq->engine->gt, cs 82 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 84 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; cs 86 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 94 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 cmd, *cs; cs 136 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, i); cs 137 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 138 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 140 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = cmd; cs 153 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; cs 154 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = intel_gt_scratch_offset(rq->engine->gt, cs 157 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 158 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 161 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH; cs 163 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; cs 164 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = intel_gt_scratch_offset(rq->engine->gt, cs 167 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 168 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 171 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = cmd; cs 173 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 221 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 223 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 6); cs 224 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 225 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 227 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(5); cs 228 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; cs 229 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; cs 230 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; /* low dword */ cs 231 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; /* high dword */ cs 232 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 233 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 235 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 6); cs 236 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 237 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 239 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(5); cs 240 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = PIPE_CONTROL_QW_WRITE; cs 241 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; cs 242 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 243 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 244 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 245 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 256 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs, flags = 0; cs 290 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 4); cs 291 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 292 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 294 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 295 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = flags; cs 296 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; cs 297 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 298 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 303 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) cs 306 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 307 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; cs 308 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 309 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 311 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 312 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = PIPE_CONTROL_QW_WRITE; cs 313 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = intel_gt_scratch_offset(rq->engine->gt, cs 316 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 319 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 320 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | cs 325 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT; cs 326 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 328 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_USER_INTERRUPT; cs 329 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 331 drivers/gpu/drm/i915/gt/intel_ringbuffer.c rq->tail = intel_ring_offset(rq, cs); cs 334 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 340 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 342 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 4); cs 343 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 344 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 346 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 347 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; cs 348 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 349 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 350 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 361 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs, flags = 0; cs 405 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 4); cs 406 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 407 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 409 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 410 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = flags; cs 411 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = scratch_addr; cs 412 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 413 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 418 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) cs 420 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = GFX_OP_PIPE_CONTROL(4); cs 421 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | cs 428 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->timeline->hwsp_offset; cs 429 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 431 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_USER_INTERRUPT; cs 432 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 434 drivers/gpu/drm/i915/gt/intel_ringbuffer.c rq->tail = intel_ring_offset(rq, cs); cs 437 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 440 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) cs 445 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; cs 446 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; cs 447 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 449 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_USER_INTERRUPT; cs 451 drivers/gpu/drm/i915/gt/intel_ringbuffer.c rq->tail = intel_ring_offset(rq, cs); cs 454 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 458 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) cs 465 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; cs 466 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; cs 467 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 470 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_STORE_DWORD_INDEX; cs 471 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I915_GEM_HWS_SEQNO_ADDR; cs 472 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 475 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH_DW; cs 476 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 477 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 479 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_USER_INTERRUPT; cs 480 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 482 drivers/gpu/drm/i915/gt/intel_ringbuffer.c rq->tail = intel_ring_offset(rq, cs); cs 485 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 938 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) cs 943 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH; cs 945 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_STORE_DWORD_INDEX; cs 946 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I915_GEM_HWS_SEQNO_ADDR; cs 947 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 949 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_USER_INTERRUPT; cs 950 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 952 drivers/gpu/drm/i915/gt/intel_ringbuffer.c rq->tail = intel_ring_offset(rq, cs); cs 955 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 959 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) cs 966 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH; cs 970 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_STORE_DWORD_INDEX; cs 971 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I915_GEM_HWS_SEQNO_ADDR; cs 972 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = rq->fence.seqno; cs 975 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_USER_INTERRUPT; cs 977 drivers/gpu/drm/i915/gt/intel_ringbuffer.c rq->tail = intel_ring_offset(rq, cs); cs 980 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 1033 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1035 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2); cs 1036 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1037 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1039 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH; cs 1040 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1041 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1087 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1089 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2); cs 1090 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1091 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1093 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & cs 1095 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = offset; cs 1096 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1110 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs, cs_offset = cs 1116 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 6); cs 1117 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1118 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1121 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; cs 1122 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; cs 1123 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ cs 1124 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = cs_offset; cs 1125 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0xdeadbeef; cs 1126 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1127 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1133 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 6 + 2); cs 1134 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1135 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1141 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); cs 1142 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; cs 1143 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; cs 1144 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = cs_offset; cs 1145 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 4096; cs 1146 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = offset; cs 1148 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_FLUSH; cs 1149 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1150 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1156 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2); cs 1157 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1158 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1160 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; cs 1161 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : cs 1163 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1173 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1175 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2); cs 1176 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1177 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1179 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; cs 1180 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : cs 1182 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1533 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1535 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 6); cs 1536 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1537 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1539 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_LOAD_REGISTER_IMM(1); cs 1540 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); cs 1541 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = PP_DIR_DCLV_2G; cs 1543 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_LOAD_REGISTER_IMM(1); cs 1544 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); cs 1545 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; cs 1547 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1555 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1557 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 4); cs 1558 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1559 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1562 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; cs 1563 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); cs 1564 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = intel_gt_scratch_offset(rq->engine->gt, cs 1566 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1568 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1581 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1603 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, len); cs 1604 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1605 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1609 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; cs 1613 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_LOAD_REGISTER_IMM(num_engines); cs 1618 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset( cs 1620 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = _MASKED_BIT_ENABLE( cs 1631 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; cs 1647 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_SET_CONTEXT; cs 1648 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_ggtt_offset(engine->kernel_context->state) | cs 1653 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1654 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_SET_CONTEXT; cs 1655 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; cs 1660 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1667 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_LOAD_REGISTER_IMM(num_engines); cs 1673 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset(last_reg); cs 1674 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = _MASKED_BIT_DISABLE( cs 1679 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; cs 1680 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset(last_reg); cs 1681 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = intel_gt_scratch_offset(rq->engine->gt, cs 1683 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1685 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; cs 1687 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_SUSPEND_FLUSH; cs 1690 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1697 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; cs 1703 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); cs 1704 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1705 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 1712 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); cs 1714 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); cs 1715 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = remap_info[i]; cs 1717 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 1718 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 1913 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 1976 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = ring->vaddr + ring->emit; cs 1977 drivers/gpu/drm/i915/gt/intel_ringbuffer.c GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); cs 1981 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return cs; cs 1988 drivers/gpu/drm/i915/gt/intel_ringbuffer.c void *cs; cs 1997 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, num_dwords); cs 1998 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 1999 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 2001 drivers/gpu/drm/i915/gt/intel_ringbuffer.c memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); cs 2002 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 2047 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 cmd, *cs; cs 2049 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 4); cs 2050 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 2051 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 2071 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = cmd; cs 2072 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; cs 2073 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = 0; cs 2074 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_NOOP; cs 2076 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 2096 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 2098 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2); cs 2099 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 2100 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 2102 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? cs 2105 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = offset; cs 2106 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 2116 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *cs; cs 2118 drivers/gpu/drm/i915/gt/intel_ringbuffer.c cs = intel_ring_begin(rq, 2); cs 2119 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (IS_ERR(cs)) cs 2120 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return PTR_ERR(cs); cs 2122 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? cs 2125 drivers/gpu/drm/i915/gt/intel_ringbuffer.c *cs++ = offset; cs 2126 drivers/gpu/drm/i915/gt/intel_ringbuffer.c intel_ring_advance(rq, cs); cs 622 drivers/gpu/drm/i915/gt/intel_workarounds.c u32 *cs; cs 632 drivers/gpu/drm/i915/gt/intel_workarounds.c cs = intel_ring_begin(rq, (wal->count * 2 + 2)); cs 633 drivers/gpu/drm/i915/gt/intel_workarounds.c if (IS_ERR(cs)) cs 634 drivers/gpu/drm/i915/gt/intel_workarounds.c return PTR_ERR(cs); cs 636 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = MI_LOAD_REGISTER_IMM(wal->count); cs 638 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = i915_mmio_reg_offset(wa->reg); cs 639 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = wa->val; cs 641 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = MI_NOOP; cs 643 drivers/gpu/drm/i915/gt/intel_workarounds.c intel_ring_advance(rq, cs); cs 1469 drivers/gpu/drm/i915/gt/intel_workarounds.c u32 srm, *cs; cs 1480 drivers/gpu/drm/i915/gt/intel_workarounds.c cs = intel_ring_begin(rq, 4 * count); cs 1481 drivers/gpu/drm/i915/gt/intel_workarounds.c if (IS_ERR(cs)) cs 1482 drivers/gpu/drm/i915/gt/intel_workarounds.c return PTR_ERR(cs); cs 1490 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = srm; cs 1491 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = offset; cs 1492 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; cs 1493 drivers/gpu/drm/i915/gt/intel_workarounds.c *cs++ = 0; cs 1495 drivers/gpu/drm/i915/gt/intel_workarounds.c intel_ring_advance(rq, cs); cs 180 drivers/gpu/drm/i915/gt/mock_engine.c static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) cs 182 drivers/gpu/drm/i915/gt/mock_engine.c return cs; cs 85 drivers/gpu/drm/i915/gt/selftest_lrc.c u32 *cs; cs 87 drivers/gpu/drm/i915/gt/selftest_lrc.c cs = intel_ring_begin(rq, 10); cs 88 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(cs)) cs 89 drivers/gpu/drm/i915/gt/selftest_lrc.c return PTR_ERR(cs); cs 91 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; cs 93 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_SEMAPHORE_WAIT | cs 97 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 98 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = i915_ggtt_offset(vma) + 4 * idx; cs 99 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 102 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 103 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); cs 104 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 105 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 1; cs 107 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_NOOP; cs 108 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_NOOP; cs 109 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_NOOP; cs 110 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_NOOP; cs 113 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; cs 115 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_ring_advance(rq, cs); cs 153 drivers/gpu/drm/i915/gt/selftest_lrc.c u32 *cs; cs 159 drivers/gpu/drm/i915/gt/selftest_lrc.c cs = intel_ring_begin(rq, 4); cs 160 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(cs)) { cs 162 drivers/gpu/drm/i915/gt/selftest_lrc.c return PTR_ERR(cs); cs 165 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 166 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); cs 167 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 168 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 1; cs 170 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_ring_advance(rq, cs); cs 359 drivers/gpu/drm/i915/gt/selftest_lrc.c u32 *cs; cs 387 drivers/gpu/drm/i915/gt/selftest_lrc.c cs = intel_ring_begin(lo, 8); cs 388 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(cs)) { cs 389 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(cs); cs 394 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 395 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = i915_ggtt_offset(vma); cs 396 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 397 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 1; cs 401 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_SEMAPHORE_WAIT | cs 405 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 406 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = i915_ggtt_offset(vma); cs 407 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 409 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_ring_advance(lo, cs); cs 431 drivers/gpu/drm/i915/gt/selftest_lrc.c cs = intel_ring_begin(hi, 4); cs 432 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(cs)) { cs 433 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(cs); cs 438 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 439 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = i915_ggtt_offset(vma); cs 440 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 441 drivers/gpu/drm/i915/gt/selftest_lrc.c *cs++ = 0; cs 443 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_ring_advance(hi, cs); cs 1598 drivers/gpu/drm/i915/gt/selftest_lrc.c u32 *cs; cs 1619 drivers/gpu/drm/i915/gt/selftest_lrc.c cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); cs 1620 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(cs)) { cs 1621 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(cs); cs 1624 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) cs 1625 drivers/gpu/drm/i915/gt/selftest_lrc.c cs[n] = MI_ARB_CHECK; cs 1626 drivers/gpu/drm/i915/gt/selftest_lrc.c cs[n] = MI_BATCH_BUFFER_END; cs 418 drivers/gpu/drm/i915/gt/selftest_timeline.c u32 *cs; cs 420 drivers/gpu/drm/i915/gt/selftest_timeline.c cs = intel_ring_begin(rq, 4); cs 421 drivers/gpu/drm/i915/gt/selftest_timeline.c if (IS_ERR(cs)) cs 422 drivers/gpu/drm/i915/gt/selftest_timeline.c return PTR_ERR(cs); cs 425 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 426 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = addr; cs 427 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = 0; cs 428 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = value; cs 430 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 431 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = 0; cs 432 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = addr; cs 433 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = value; cs 435 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; cs 436 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = addr; cs 437 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = value; cs 438 drivers/gpu/drm/i915/gt/selftest_timeline.c *cs++ = MI_NOOP; cs 441 drivers/gpu/drm/i915/gt/selftest_timeline.c intel_ring_advance(rq, cs); cs 80 drivers/gpu/drm/i915/gt/selftest_workarounds.c u32 srm, *cs; cs 90 drivers/gpu/drm/i915/gt/selftest_workarounds.c cs = i915_gem_object_pin_map(result, I915_MAP_WB); cs 91 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(cs)) { cs 92 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(cs); cs 95 drivers/gpu/drm/i915/gt/selftest_workarounds.c memset(cs, 0xc5, PAGE_SIZE); cs 127 drivers/gpu/drm/i915/gt/selftest_workarounds.c cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); cs 128 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(cs)) { cs 129 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(cs); cs 134 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = srm; cs 135 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); cs 136 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; cs 137 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = 0; cs 139 drivers/gpu/drm/i915/gt/selftest_workarounds.c intel_ring_advance(rq, cs); cs 469 drivers/gpu/drm/i915/gt/selftest_workarounds.c u32 *cs, *results; cs 503 drivers/gpu/drm/i915/gt/selftest_workarounds.c cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); cs 504 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(cs)) { cs 505 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(cs); cs 510 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = srm; cs 511 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 512 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = lower_32_bits(addr); cs 513 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = upper_32_bits(addr); cs 518 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = MI_LOAD_REGISTER_IMM(1); cs 519 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 520 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = values[v]; cs 523 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = srm; cs 524 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 525 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = lower_32_bits(addr + sizeof(u32) * idx); cs 526 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = upper_32_bits(addr + sizeof(u32) * idx); cs 531 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = MI_LOAD_REGISTER_IMM(1); cs 532 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 533 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = ~values[v]; cs 536 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = srm; cs 537 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 538 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = lower_32_bits(addr + sizeof(u32) * idx); cs 539 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = upper_32_bits(addr + sizeof(u32) * idx); cs 545 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = lrm; cs 546 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 547 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = lower_32_bits(addr); cs 548 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = upper_32_bits(addr); cs 550 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = MI_BATCH_BUFFER_END; cs 769 drivers/gpu/drm/i915/gt/selftest_workarounds.c u32 srm, *cs; cs 779 drivers/gpu/drm/i915/gt/selftest_workarounds.c cs = intel_ring_begin(rq, 4 * engine->whitelist.count); cs 780 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(cs)) { cs 781 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(cs); cs 792 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = srm; cs 793 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 794 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = lower_32_bits(offset); cs 795 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = upper_32_bits(offset); cs 797 drivers/gpu/drm/i915/gt/selftest_workarounds.c intel_ring_advance(rq, cs); cs 814 drivers/gpu/drm/i915/gt/selftest_workarounds.c u32 *cs; cs 820 drivers/gpu/drm/i915/gt/selftest_workarounds.c cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); cs 821 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(cs)) { cs 822 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(cs); cs 826 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); cs 833 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = reg; cs 834 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = 0xffffffff; cs 836 drivers/gpu/drm/i915/gt/selftest_workarounds.c *cs++ = MI_BATCH_BUFFER_END; cs 195 drivers/gpu/drm/i915/gvt/mmio_context.c u32 *cs; cs 209 drivers/gpu/drm/i915/gvt/mmio_context.c cs = intel_ring_begin(req, count * 2 + 2); cs 210 drivers/gpu/drm/i915/gvt/mmio_context.c if (IS_ERR(cs)) cs 211 drivers/gpu/drm/i915/gvt/mmio_context.c return PTR_ERR(cs); cs 213 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_LOAD_REGISTER_IMM(count); cs 220 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = i915_mmio_reg_offset(mmio->reg); cs 221 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | cs 224 drivers/gpu/drm/i915/gvt/mmio_context.c *(cs-2), *(cs-1), vgpu->id, ring_id); cs 227 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_NOOP; cs 228 drivers/gpu/drm/i915/gvt/mmio_context.c intel_ring_advance(req, cs); cs 242 drivers/gpu/drm/i915/gvt/mmio_context.c u32 *cs; cs 244 drivers/gpu/drm/i915/gvt/mmio_context.c cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); cs 245 drivers/gpu/drm/i915/gvt/mmio_context.c if (IS_ERR(cs)) cs 246 drivers/gpu/drm/i915/gvt/mmio_context.c return PTR_ERR(cs); cs 248 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); cs 251 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); cs 252 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); cs 254 drivers/gpu/drm/i915/gvt/mmio_context.c *(cs-2), *(cs-1), vgpu->id, req->engine->id); cs 258 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_NOOP; cs 259 drivers/gpu/drm/i915/gvt/mmio_context.c intel_ring_advance(req, cs); cs 269 drivers/gpu/drm/i915/gvt/mmio_context.c u32 *cs; cs 271 drivers/gpu/drm/i915/gvt/mmio_context.c cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); cs 272 drivers/gpu/drm/i915/gvt/mmio_context.c if (IS_ERR(cs)) cs 273 drivers/gpu/drm/i915/gvt/mmio_context.c return PTR_ERR(cs); cs 275 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); cs 278 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); cs 279 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); cs 281 drivers/gpu/drm/i915/gvt/mmio_context.c *(cs-2), *(cs-1), vgpu->id, req->engine->id); cs 285 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_NOOP; cs 286 drivers/gpu/drm/i915/gvt/mmio_context.c intel_ring_advance(req, cs); cs 300 drivers/gpu/drm/i915/gvt/mmio_context.c u32 *cs; cs 302 drivers/gpu/drm/i915/gvt/mmio_context.c cs = intel_ring_begin(req, 2); cs 303 drivers/gpu/drm/i915/gvt/mmio_context.c if (IS_ERR(cs)) cs 304 drivers/gpu/drm/i915/gvt/mmio_context.c return PTR_ERR(cs); cs 306 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; cs 307 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_NOOP; cs 308 drivers/gpu/drm/i915/gvt/mmio_context.c intel_ring_advance(req, cs); cs 327 drivers/gpu/drm/i915/gvt/mmio_context.c cs = intel_ring_begin(req, 2); cs 328 drivers/gpu/drm/i915/gvt/mmio_context.c if (IS_ERR(cs)) cs 329 drivers/gpu/drm/i915/gvt/mmio_context.c return PTR_ERR(cs); cs 331 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; cs 332 drivers/gpu/drm/i915/gvt/mmio_context.c *cs++ = MI_NOOP; cs 333 drivers/gpu/drm/i915/gvt/mmio_context.c intel_ring_advance(req, cs); cs 306 drivers/gpu/drm/i915/gvt/scheduler.c u32 *cs; cs 331 drivers/gpu/drm/i915/gvt/scheduler.c cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); cs 332 drivers/gpu/drm/i915/gvt/scheduler.c if (IS_ERR(cs)) { cs 335 drivers/gpu/drm/i915/gvt/scheduler.c return PTR_ERR(cs); cs 341 drivers/gpu/drm/i915/gvt/scheduler.c workload->shadow_ring_buffer_va = cs; cs 343 drivers/gpu/drm/i915/gvt/scheduler.c memcpy(cs, shadow_ring_buffer_va, cs 346 drivers/gpu/drm/i915/gvt/scheduler.c cs += workload->rb_len / sizeof(u32); cs 347 drivers/gpu/drm/i915/gvt/scheduler.c intel_ring_advance(workload->req, cs); cs 1722 drivers/gpu/drm/i915/i915_perf.c u32 *cs; cs 1724 drivers/gpu/drm/i915/i915_perf.c cs = intel_ring_begin(rq, 4 * count); cs 1725 drivers/gpu/drm/i915/i915_perf.c if (IS_ERR(cs)) cs 1726 drivers/gpu/drm/i915/i915_perf.c return PTR_ERR(cs); cs 1730 drivers/gpu/drm/i915/i915_perf.c *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; cs 1731 drivers/gpu/drm/i915/i915_perf.c *cs++ = offset + (flex->offset + 1) * sizeof(u32); cs 1732 drivers/gpu/drm/i915/i915_perf.c *cs++ = 0; cs 1733 drivers/gpu/drm/i915/i915_perf.c *cs++ = flex->value; cs 1736 drivers/gpu/drm/i915/i915_perf.c intel_ring_advance(rq, cs); cs 1746 drivers/gpu/drm/i915/i915_perf.c u32 *cs; cs 1750 drivers/gpu/drm/i915/i915_perf.c cs = intel_ring_begin(rq, 2 * count + 2); cs 1751 drivers/gpu/drm/i915/i915_perf.c if (IS_ERR(cs)) cs 1752 drivers/gpu/drm/i915/i915_perf.c return PTR_ERR(cs); cs 1754 drivers/gpu/drm/i915/i915_perf.c *cs++ = MI_LOAD_REGISTER_IMM(count); cs 1756 drivers/gpu/drm/i915/i915_perf.c *cs++ = i915_mmio_reg_offset(flex->reg); cs 1757 drivers/gpu/drm/i915/i915_perf.c *cs++ = flex->value; cs 1759 drivers/gpu/drm/i915/i915_perf.c *cs++ = MI_NOOP; cs 1761 drivers/gpu/drm/i915/i915_perf.c intel_ring_advance(rq, cs); cs 837 drivers/gpu/drm/i915/i915_request.c u32 *cs; cs 863 drivers/gpu/drm/i915/i915_request.c cs = intel_ring_begin(to, 4); cs 864 drivers/gpu/drm/i915/i915_request.c if (IS_ERR(cs)) cs 865 drivers/gpu/drm/i915/i915_request.c return PTR_ERR(cs); cs 875 drivers/gpu/drm/i915/i915_request.c *cs++ = MI_SEMAPHORE_WAIT | cs 879 drivers/gpu/drm/i915/i915_request.c *cs++ = from->fence.seqno; cs 880 drivers/gpu/drm/i915/i915_request.c *cs++ = hwsp_offset; cs 881 drivers/gpu/drm/i915/i915_request.c *cs++ = 0; cs 883 drivers/gpu/drm/i915/i915_request.c intel_ring_advance(to, cs); cs 1191 drivers/gpu/drm/i915/i915_request.c u32 *cs; cs 1211 drivers/gpu/drm/i915/i915_request.c cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw); cs 1212 drivers/gpu/drm/i915/i915_request.c GEM_BUG_ON(IS_ERR(cs)); cs 1213 drivers/gpu/drm/i915/i915_request.c rq->postfix = intel_ring_offset(rq, cs); cs 66 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c void *cs = state->client_state[cid]; cs 71 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c WARN_ON(bitmap_weight(cs, cnt) > 0); cs 89 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c set_bit(blk, cs); cs 205 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c void *cs = state->client_state[cid]; cs 208 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c bitmap_andnot(state->state, state->state, cs, cnt); cs 211 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c bitmap_zero(cs, cnt); cs 299 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c void *cs = state->client_state[cid]; cs 301 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c nblks += update_smp_state(smp, cid, cs); cs 356 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c void *cs = state->client_state[cid]; cs 357 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c int inuse = bitmap_weight(cs, smp->blk_cnt); cs 63 drivers/gpu/drm/msm/disp/mdp_format.c #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ cs 75 drivers/gpu/drm/msm/disp/mdp_format.c .chroma_sample = cs, \ cs 272 drivers/gpu/drm/radeon/radeon_cs.c struct drm_radeon_cs *cs = data; cs 280 drivers/gpu/drm/radeon/radeon_cs.c if (!cs->num_chunks) { cs 292 drivers/gpu/drm/radeon/radeon_cs.c p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); cs 296 drivers/gpu/drm/radeon/radeon_cs.c chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); cs 298 drivers/gpu/drm/radeon/radeon_cs.c sizeof(uint64_t)*cs->num_chunks)) { cs 302 drivers/gpu/drm/radeon/radeon_cs.c p->nchunks = cs->num_chunks; cs 363 drivers/gpu/ipu-v3/ipu-ic-csc.c if (csc->in_cs.cs == csc->out_cs.cs) { cs 364 drivers/gpu/ipu-v3/ipu-ic-csc.c csc->params = (csc->in_cs.cs == IPUV3_COLORSPACE_YUV) ? cs 374 drivers/gpu/ipu-v3/ipu-ic-csc.c params_tbl = (csc->in_cs.cs == IPUV3_COLORSPACE_YUV) ? cs 378 drivers/gpu/ipu-v3/ipu-ic-csc.c params_tbl = (csc->in_cs.cs == IPUV3_COLORSPACE_YUV) ? cs 283 drivers/gpu/ipu-v3/ipu-ic.c if (ic->in_cs.cs != ic->out_cs.cs) cs 290 drivers/gpu/ipu-v3/ipu-ic.c if (ic->g_in_cs.cs != ic->out_cs.cs) cs 2912 drivers/infiniband/hw/qib/qib_iba6120.c struct qib_chip_specific *cs = from_timer(cs, t, pma_timer); cs 2913 drivers/infiniband/hw/qib/qib_iba6120.c struct qib_pportdata *ppd = cs->ppd; cs 2918 drivers/infiniband/hw/qib/qib_iba6120.c if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) { cs 2919 drivers/infiniband/hw/qib/qib_iba6120.c cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; cs 2920 drivers/infiniband/hw/qib/qib_iba6120.c qib_snapshot_counters(ppd, &cs->sword, &cs->rword, cs 2921 drivers/infiniband/hw/qib/qib_iba6120.c &cs->spkts, &cs->rpkts, &cs->xmit_wait); cs 2922 drivers/infiniband/hw/qib/qib_iba6120.c mod_timer(&cs->pma_timer, cs 2924 drivers/infiniband/hw/qib/qib_iba6120.c } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { cs 2927 drivers/infiniband/hw/qib/qib_iba6120.c cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; cs 2930 drivers/infiniband/hw/qib/qib_iba6120.c cs->sword = ta - cs->sword; cs 2931 drivers/infiniband/hw/qib/qib_iba6120.c cs->rword = tb - cs->rword; cs 2932 drivers/infiniband/hw/qib/qib_iba6120.c cs->spkts = tc - cs->spkts; cs 2933 drivers/infiniband/hw/qib/qib_iba6120.c cs->rpkts = td - cs->rpkts; cs 2934 drivers/infiniband/hw/qib/qib_iba6120.c cs->xmit_wait = te - cs->xmit_wait; cs 2945 drivers/infiniband/hw/qib/qib_iba6120.c struct qib_chip_specific *cs = ppd->dd->cspec; cs 2948 drivers/infiniband/hw/qib/qib_iba6120.c cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED; cs 2949 drivers/infiniband/hw/qib/qib_iba6120.c mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start)); cs 2951 drivers/infiniband/hw/qib/qib_iba6120.c cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; cs 2952 drivers/infiniband/hw/qib/qib_iba6120.c qib_snapshot_counters(ppd, &cs->sword, &cs->rword, cs 2953 drivers/infiniband/hw/qib/qib_iba6120.c &cs->spkts, &cs->rpkts, &cs->xmit_wait); cs 2954 drivers/infiniband/hw/qib/qib_iba6120.c mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv)); cs 2956 drivers/infiniband/hw/qib/qib_iba6120.c cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; cs 2957 drivers/infiniband/hw/qib/qib_iba6120.c cs->sword = 0; cs 2958 drivers/infiniband/hw/qib/qib_iba6120.c cs->rword = 0; cs 2959 drivers/infiniband/hw/qib/qib_iba6120.c cs->spkts = 0; cs 2960 drivers/infiniband/hw/qib/qib_iba6120.c cs->rpkts = 0; cs 2961 drivers/infiniband/hw/qib/qib_iba6120.c cs->xmit_wait = 0; cs 1389 drivers/infiniband/hw/qib/qib_sd7220.c struct qib_chip_specific *cs = from_timer(cs, t, relock_timer); cs 1390 drivers/infiniband/hw/qib/qib_sd7220.c struct qib_devdata *dd = cs->dd; cs 1408 drivers/infiniband/hw/qib/qib_sd7220.c timeoff = cs->relock_interval << 1; cs 1411 drivers/infiniband/hw/qib/qib_sd7220.c cs->relock_interval = timeoff; cs 1414 drivers/infiniband/hw/qib/qib_sd7220.c mod_timer(&cs->relock_timer, jiffies + timeoff); cs 1419 drivers/infiniband/hw/qib/qib_sd7220.c struct qib_chip_specific *cs = dd->cspec; cs 1423 drivers/infiniband/hw/qib/qib_sd7220.c if (cs->relock_timer_active) { cs 1424 drivers/infiniband/hw/qib/qib_sd7220.c cs->relock_interval = HZ; cs 1425 drivers/infiniband/hw/qib/qib_sd7220.c mod_timer(&cs->relock_timer, jiffies + HZ); cs 1435 drivers/infiniband/hw/qib/qib_sd7220.c if (!cs->relock_timer_active) { cs 1436 drivers/infiniband/hw/qib/qib_sd7220.c cs->relock_timer_active = 1; cs 1437 drivers/infiniband/hw/qib/qib_sd7220.c timer_setup(&cs->relock_timer, qib_run_relock, 0); cs 1438 drivers/infiniband/hw/qib/qib_sd7220.c cs->relock_interval = timeout; cs 1439 drivers/infiniband/hw/qib/qib_sd7220.c cs->relock_timer.expires = jiffies + timeout; cs 1440 drivers/infiniband/hw/qib/qib_sd7220.c add_timer(&cs->relock_timer); cs 1442 drivers/infiniband/hw/qib/qib_sd7220.c cs->relock_interval = timeout; cs 1443 drivers/infiniband/hw/qib/qib_sd7220.c mod_timer(&cs->relock_timer, jiffies + timeout); cs 29 drivers/input/joystick/iforce/iforce-serio.c unsigned char cs; cs 47 drivers/input/joystick/iforce/iforce-serio.c cs = 0x2b; cs 52 drivers/input/joystick/iforce/iforce-serio.c cs ^= iforce->xmit.buf[iforce->xmit.tail]; cs 58 drivers/input/joystick/iforce/iforce-serio.c cs ^= iforce->xmit.buf[iforce->xmit.tail]; cs 62 drivers/input/joystick/iforce/iforce-serio.c serio_write(iforce_serio->serio, cs); cs 59 drivers/iommu/arm-smmu-impl.c struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu); cs 65 drivers/iommu/arm-smmu-impl.c cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count); cs 73 drivers/iommu/arm-smmu-impl.c struct cavium_smmu *cs = container_of(smmu_domain->smmu, cs 77 drivers/iommu/arm-smmu-impl.c smmu_domain->cfg.vmid += cs->id_base; cs 79 drivers/iommu/arm-smmu-impl.c smmu_domain->cfg.asid += cs->id_base; cs 91 drivers/iommu/arm-smmu-impl.c struct cavium_smmu *cs; cs 93 drivers/iommu/arm-smmu-impl.c cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL); cs 94 drivers/iommu/arm-smmu-impl.c if (!cs) cs 97 drivers/iommu/arm-smmu-impl.c cs->smmu = *smmu; cs 98 drivers/iommu/arm-smmu-impl.c cs->smmu.impl = &cavium_impl; cs 102 drivers/iommu/arm-smmu-impl.c return &cs->smmu; cs 485 drivers/isdn/hardware/mISDN/hfcpci.c cs->err_rx++; cs 692 drivers/isdn/hardware/mISDN/hfcpci.c cs->err_tx++; cs 377 drivers/isdn/mISDN/dsp_blowfish.c u32 cs; cs 426 drivers/isdn/mISDN/dsp_blowfish.c cs = yl ^ (yl >> 3) ^ (yl >> 6) ^ (yl >> 9) ^ (yl >> 12) ^ (yl >> 15) cs 441 drivers/isdn/mISDN/dsp_blowfish.c bf_crypt_out[5] = ((yr >> 22) & 0x7f) | ((cs << 5) & 0x80); cs 442 drivers/isdn/mISDN/dsp_blowfish.c bf_crypt_out[6] = ((yr >> 15) & 0x7f) | ((cs << 6) & 0x80); cs 443 drivers/isdn/mISDN/dsp_blowfish.c bf_crypt_out[7] = ((yr >> 8) & 0x7f) | (cs << 7); cs 470 drivers/isdn/mISDN/dsp_blowfish.c u8 cs, cs0, cs1, cs2; cs 504 drivers/isdn/mISDN/dsp_blowfish.c cs = yl ^ (yl >> 3) ^ (yl >> 6) ^ (yl >> 9) ^ (yl >> 12) ^ (yl >> 15) cs 511 drivers/isdn/mISDN/dsp_blowfish.c if ((cs & 0x7) != (((cs2 >> 5) & 4) | ((cs1 >> 6) & 2) | (cs0 >> 7))) { cs 867 drivers/md/bcache/bcache.h #define for_each_cache(ca, cs, iter) \ cs 868 drivers/md/bcache/bcache.h for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) cs 950 drivers/media/dvb-frontends/mxl5xx.c u8 *fw, cs = 0; cs 959 drivers/media/dvb-frontends/mxl5xx.c cs += fw[i]; cs 960 drivers/media/dvb-frontends/mxl5xx.c if (cs != fh->image_checksum) { cs 273 drivers/media/platform/omap3isp/isppreview.c const struct omap3isp_prev_csup *cs = ¶ms->csup; cs 276 drivers/media/platform/omap3isp/isppreview.c cs->gain | (cs->thres << ISPPRV_CSUP_THRES_SHIFT) | cs 277 drivers/media/platform/omap3isp/isppreview.c (cs->hypf_en << ISPPRV_CSUP_HPYF_SHIFT), cs 740 drivers/media/usb/pvrusb2/pvrusb2-hdw.c struct v4l2_ext_controls cs; cs 742 drivers/media/usb/pvrusb2/pvrusb2-hdw.c memset(&cs,0,sizeof(cs)); cs 744 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.controls = &c1; cs 745 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.count = 1; cs 747 drivers/media/usb/pvrusb2/pvrusb2-hdw.c ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state, 0, &cs, cs 758 drivers/media/usb/pvrusb2/pvrusb2-hdw.c struct v4l2_ext_controls cs; cs 760 drivers/media/usb/pvrusb2/pvrusb2-hdw.c memset(&cs,0,sizeof(cs)); cs 762 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.controls = &c1; cs 763 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.count = 1; cs 767 drivers/media/usb/pvrusb2/pvrusb2-hdw.c hdw->state_encoder_run, &cs, cs 776 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 0, &cs, cs 3059 drivers/media/usb/pvrusb2/pvrusb2-hdw.c struct v4l2_ext_controls cs; cs 3061 drivers/media/usb/pvrusb2/pvrusb2-hdw.c memset(&cs, 0, sizeof(cs)); cs 3063 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.controls = &c1; cs 3064 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.count = 1; cs 3067 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cx2341x_ext_ctrls(&hdw->enc_ctl_state, 0, &cs, cs 3121 drivers/media/usb/pvrusb2/pvrusb2-hdw.c struct v4l2_ext_controls cs; cs 3123 drivers/media/usb/pvrusb2/pvrusb2-hdw.c memset(&cs,0,sizeof(cs)); cs 3125 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.controls = &c1; cs 3126 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cs.count = 1; cs 3129 drivers/media/usb/pvrusb2/pvrusb2-hdw.c cx2341x_ext_ctrls(&hdw->enc_ctl_state, 0, &cs,VIDIOC_S_EXT_CTRLS); cs 29 drivers/media/usb/uvc/uvc_video.c u8 intfnum, u8 cs, void *data, u16 size, cs 39 drivers/media/usb/uvc/uvc_video.c return usb_control_msg(dev->udev, pipe, query, type, cs << 8, cs 68 drivers/media/usb/uvc/uvc_video.c u8 intfnum, u8 cs, void *data, u16 size) cs 74 drivers/media/usb/uvc/uvc_video.c ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size, cs 81 drivers/media/usb/uvc/uvc_video.c uvc_query_name(query), cs, unit, ret, size); cs 801 drivers/media/usb/uvc/uvcvideo.h u8 intfnum, u8 cs, void *data, u16 size); cs 3296 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_controls *cs, cs 3305 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0, h = helpers; i < cs->count; i++, h++) { cs 3306 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control *c = &cs->controls[i]; cs 3311 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = i; cs 3313 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->which && cs 3314 drivers/media/v4l2-core/v4l2-ctrls.c cs->which != V4L2_CTRL_WHICH_DEF_VAL && cs 3315 drivers/media/v4l2-core/v4l2-ctrls.c cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && cs 3316 drivers/media/v4l2-core/v4l2-ctrls.c V4L2_CTRL_ID2WHICH(id) != cs->which) { cs 3319 drivers/media/v4l2-core/v4l2-ctrls.c cs->which, id); cs 3385 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0; i < cs->count; i++) cs 3387 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0, h = helpers; i < cs->count; i++, h++) { cs 3421 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_controls *cs, cs 3430 drivers/media/v4l2-core/v4l2-ctrls.c def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); cs 3432 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = cs->count; cs 3433 drivers/media/v4l2-core/v4l2-ctrls.c cs->which = V4L2_CTRL_ID2WHICH(cs->which); cs 3438 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->count == 0) cs 3439 drivers/media/v4l2-core/v4l2-ctrls.c return class_check(hdl, cs->which); cs 3441 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->count > ARRAY_SIZE(helper)) { cs 3442 drivers/media/v4l2-core/v4l2-ctrls.c helpers = kvmalloc_array(cs->count, sizeof(helper[0]), cs 3448 drivers/media/v4l2-core/v4l2-ctrls.c ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true); cs 3449 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = cs->count; cs 3451 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0; !ret && i < cs->count; i++) cs 3455 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0; !ret && i < cs->count; i++) { cs 3466 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = i; cs 3487 drivers/media/v4l2-core/v4l2-ctrls.c ret = req_to_user(cs->controls + idx, cs 3490 drivers/media/v4l2-core/v4l2-ctrls.c ret = ctrl_to_user(cs->controls + idx, cs 3498 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->count > ARRAY_SIZE(helper)) cs 3542 drivers/media/v4l2-core/v4l2-ctrls.c struct media_device *mdev, struct v4l2_ext_controls *cs) cs 3548 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) { cs 3549 drivers/media/v4l2-core/v4l2-ctrls.c if (!mdev || cs->request_fd < 0) cs 3552 drivers/media/v4l2-core/v4l2-ctrls.c req = media_request_get_by_fd(mdev, cs->request_fd); cs 3578 drivers/media/v4l2-core/v4l2-ctrls.c ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev); cs 3720 drivers/media/v4l2-core/v4l2-ctrls.c static int validate_ctrls(struct v4l2_ext_controls *cs, cs 3728 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = cs->count; cs 3729 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0; i < cs->count; i++) { cs 3733 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = i; cs 3760 drivers/media/v4l2-core/v4l2-ctrls.c p_new.p_s64 = &cs->controls[i].value64; cs 3762 drivers/media/v4l2-core/v4l2-ctrls.c p_new.p_s32 = &cs->controls[i].value; cs 3787 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_controls *cs, cs 3795 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = cs->count; cs 3798 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) { cs 3804 drivers/media/v4l2-core/v4l2-ctrls.c cs->which = V4L2_CTRL_ID2WHICH(cs->which); cs 3812 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->count == 0) cs 3813 drivers/media/v4l2-core/v4l2-ctrls.c return class_check(hdl, cs->which); cs 3815 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->count > ARRAY_SIZE(helper)) { cs 3816 drivers/media/v4l2-core/v4l2-ctrls.c helpers = kvmalloc_array(cs->count, sizeof(helper[0]), cs 3821 drivers/media/v4l2-core/v4l2-ctrls.c ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false); cs 3823 drivers/media/v4l2-core/v4l2-ctrls.c ret = validate_ctrls(cs, helpers, vdev, set); cs 3825 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = cs->count; cs 3826 drivers/media/v4l2-core/v4l2-ctrls.c for (i = 0; !ret && i < cs->count; i++) { cs 3833 drivers/media/v4l2-core/v4l2-ctrls.c cs->error_idx = i; cs 3858 drivers/media/v4l2-core/v4l2-ctrls.c new_auto_val = cs->controls[tmp_idx].value; cs 3872 drivers/media/v4l2-core/v4l2-ctrls.c ret = user_to_new(cs->controls + idx, ctrl); cs 3894 drivers/media/v4l2-core/v4l2-ctrls.c ret = new_to_user(cs->controls + idx, cs 3902 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->count > ARRAY_SIZE(helper)) cs 3911 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_controls *cs, bool set) cs 3917 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) { cs 3924 drivers/media/v4l2-core/v4l2-ctrls.c if (cs->request_fd < 0) { cs 3926 drivers/media/v4l2-core/v4l2-ctrls.c video_device_node_name(vdev), cs->request_fd); cs 3930 drivers/media/v4l2-core/v4l2-ctrls.c req = media_request_get_by_fd(mdev, cs->request_fd); cs 3933 drivers/media/v4l2-core/v4l2-ctrls.c video_device_node_name(vdev), cs->request_fd); cs 3940 drivers/media/v4l2-core/v4l2-ctrls.c video_device_node_name(vdev), cs->request_fd); cs 3950 drivers/media/v4l2-core/v4l2-ctrls.c cs->request_fd); cs 3959 drivers/media/v4l2-core/v4l2-ctrls.c ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); cs 3977 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_controls *cs) cs 3979 drivers/media/v4l2-core/v4l2-ctrls.c return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false); cs 3987 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_controls *cs) cs 3989 drivers/media/v4l2-core/v4l2-ctrls.c return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true); cs 23 drivers/memory/atmel-ebi.c int cs; cs 83 drivers/memory/atmel-ebi.c atmel_smc_cs_conf_get(ebid->ebi->smc.regmap, conf->cs, cs 91 drivers/memory/atmel-ebi.c conf->cs, &conf->smcconf); cs 286 drivers/memory/atmel-ebi.c atmel_smc_cs_conf_apply(ebid->ebi->smc.regmap, conf->cs, cs 294 drivers/memory/atmel-ebi.c conf->cs, &conf->smcconf); cs 307 drivers/memory/atmel-ebi.c u32 cs; cs 313 drivers/memory/atmel-ebi.c &cs); cs 317 drivers/memory/atmel-ebi.c if (cs >= AT91_MATRIX_EBI_NUM_CS || cs 318 drivers/memory/atmel-ebi.c !(ebi->caps->available_cs & BIT(cs))) { cs 323 drivers/memory/atmel-ebi.c if (!test_and_set_bit(cs, &cslines)) cs 347 drivers/memory/atmel-ebi.c for_each_set_bit(cs, &cslines, AT91_MATRIX_EBI_NUM_CS) { cs 348 drivers/memory/atmel-ebi.c ebid->configs[i].cs = cs; cs 351 drivers/memory/atmel-ebi.c conf.cs = cs; cs 364 drivers/memory/atmel-ebi.c BIT(cs), 0); cs 265 drivers/memory/omap-gpmc.c void gpmc_cs_write_reg(int cs, int idx, u32 val) cs 269 drivers/memory/omap-gpmc.c reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; cs 273 drivers/memory/omap-gpmc.c static u32 gpmc_cs_read_reg(int cs, int idx) cs 277 drivers/memory/omap-gpmc.c reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; cs 300 drivers/memory/omap-gpmc.c static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd) cs 310 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); cs 325 drivers/memory/omap-gpmc.c static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs, cs 331 drivers/memory/omap-gpmc.c tick_ps = gpmc_get_clk_period(cs, cd); cs 351 drivers/memory/omap-gpmc.c static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs, cs 354 drivers/memory/omap-gpmc.c return ticks * gpmc_get_clk_period(cs, cd) / 1000; cs 374 drivers/memory/omap-gpmc.c static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value) cs 378 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, reg); cs 383 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, reg, l); cs 386 drivers/memory/omap-gpmc.c static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) cs 388 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1, cs 391 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2, cs 393 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3, cs 395 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, cs 397 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, cs 399 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, cs 402 drivers/memory/omap-gpmc.c gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, cs 430 drivers/memory/omap-gpmc.c int cs, int reg, int st_bit, int end_bit, int max, cs 442 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, reg); cs 459 drivers/memory/omap-gpmc.c time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1; cs 460 drivers/memory/omap-gpmc.c time_ns = gpmc_clk_ticks_to_ns(l, cs, cd); cs 473 drivers/memory/omap-gpmc.c #define GPMC_PRINT_CONFIG(cs, config) \ cs 474 drivers/memory/omap-gpmc.c pr_info("cs%i %s: 0x%08x\n", cs, #config, \ cs 475 drivers/memory/omap-gpmc.c gpmc_cs_read_reg(cs, config)) cs 477 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 0) cs 479 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, 0, 1, 0) cs 481 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 1) cs 483 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, (shift), 1, 1) cs 485 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 0, 0) cs 487 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, (cd), 0, 0, 0) cs 489 drivers/memory/omap-gpmc.c get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, (cd), 0, 0, 0) cs 491 drivers/memory/omap-gpmc.c static void gpmc_show_regs(int cs, const char *desc) cs 493 drivers/memory/omap-gpmc.c pr_info("gpmc cs%i %s:\n", cs, desc); cs 494 drivers/memory/omap-gpmc.c GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1); cs 495 drivers/memory/omap-gpmc.c GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2); cs 496 drivers/memory/omap-gpmc.c GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3); cs 497 drivers/memory/omap-gpmc.c GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4); cs 498 drivers/memory/omap-gpmc.c GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5); cs 499 drivers/memory/omap-gpmc.c GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6); cs 506 drivers/memory/omap-gpmc.c static void gpmc_cs_show_timings(int cs, const char *desc) cs 508 drivers/memory/omap-gpmc.c gpmc_show_regs(cs, desc); cs 510 drivers/memory/omap-gpmc.c pr_info("gpmc cs%i access configuration:\n", cs); cs 537 drivers/memory/omap-gpmc.c pr_info("gpmc cs%i timings configuration:\n", cs); cs 582 drivers/memory/omap-gpmc.c static inline void gpmc_cs_show_timings(int cs, const char *desc) cs 603 drivers/memory/omap-gpmc.c static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max, cs 612 drivers/memory/omap-gpmc.c ticks = gpmc_ns_to_clk_ticks(time, cs, cd); cs 621 drivers/memory/omap-gpmc.c __func__, cs, name, time, ticks, max); cs 626 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, reg); cs 630 drivers/memory/omap-gpmc.c cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000, cs 635 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, reg, l); cs 641 drivers/memory/omap-gpmc.c if (set_gpmc_timing_reg(cs, (reg), (st), (end), (max), \ cs 705 drivers/memory/omap-gpmc.c int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, cs 778 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); cs 781 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l); cs 792 drivers/memory/omap-gpmc.c cs, (div * gpmc_get_fclk_period()) / 1000, div); cs 795 drivers/memory/omap-gpmc.c gpmc_cs_bool_timings(cs, &t->bool_timings); cs 796 drivers/memory/omap-gpmc.c gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings"); cs 801 drivers/memory/omap-gpmc.c static int gpmc_cs_set_memconf(int cs, u32 base, u32 size) cs 818 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); cs 823 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); cs 828 drivers/memory/omap-gpmc.c static void gpmc_cs_enable_mem(int cs) cs 832 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); cs 834 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); cs 837 drivers/memory/omap-gpmc.c static void gpmc_cs_disable_mem(int cs) cs 841 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); cs 843 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); cs 846 drivers/memory/omap-gpmc.c static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size) cs 851 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); cs 857 drivers/memory/omap-gpmc.c static int gpmc_cs_mem_enabled(int cs) cs 861 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); cs 865 drivers/memory/omap-gpmc.c static void gpmc_cs_set_reserved(int cs, int reserved) cs 867 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 872 drivers/memory/omap-gpmc.c static bool gpmc_cs_reserved(int cs) cs 874 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 879 drivers/memory/omap-gpmc.c static void gpmc_cs_set_name(int cs, const char *name) cs 881 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 886 drivers/memory/omap-gpmc.c static const char *gpmc_cs_get_name(int cs) cs 888 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 907 drivers/memory/omap-gpmc.c static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size) cs 909 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 923 drivers/memory/omap-gpmc.c static int gpmc_cs_delete_mem(int cs) cs 925 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 947 drivers/memory/omap-gpmc.c static int gpmc_cs_remap(int cs, u32 base) cs 952 drivers/memory/omap-gpmc.c if (cs > gpmc_cs_num) { cs 964 drivers/memory/omap-gpmc.c gpmc_cs_get_memconf(cs, &old_base, &size); cs 968 drivers/memory/omap-gpmc.c ret = gpmc_cs_delete_mem(cs); cs 972 drivers/memory/omap-gpmc.c ret = gpmc_cs_insert_mem(cs, base, size); cs 976 drivers/memory/omap-gpmc.c ret = gpmc_cs_set_memconf(cs, base, size); cs 981 drivers/memory/omap-gpmc.c int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) cs 983 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 987 drivers/memory/omap-gpmc.c if (cs > gpmc_cs_num) { cs 996 drivers/memory/omap-gpmc.c if (gpmc_cs_reserved(cs)) { cs 1000 drivers/memory/omap-gpmc.c if (gpmc_cs_mem_enabled(cs)) cs 1009 drivers/memory/omap-gpmc.c gpmc_cs_disable_mem(cs); cs 1011 drivers/memory/omap-gpmc.c r = gpmc_cs_set_memconf(cs, res->start, resource_size(res)); cs 1018 drivers/memory/omap-gpmc.c gpmc_cs_enable_mem(cs); cs 1020 drivers/memory/omap-gpmc.c gpmc_cs_set_reserved(cs, 1); cs 1027 drivers/memory/omap-gpmc.c void gpmc_cs_free(int cs) cs 1029 drivers/memory/omap-gpmc.c struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; cs 1033 drivers/memory/omap-gpmc.c if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { cs 1034 drivers/memory/omap-gpmc.c printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs); cs 1039 drivers/memory/omap-gpmc.c gpmc_cs_disable_mem(cs); cs 1042 drivers/memory/omap-gpmc.c gpmc_cs_set_reserved(cs, 0); cs 1096 drivers/memory/omap-gpmc.c struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs) cs 1100 drivers/memory/omap-gpmc.c if (cs >= gpmc_cs_num) cs 1104 drivers/memory/omap-gpmc.c GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; cs 1106 drivers/memory/omap-gpmc.c GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs; cs 1108 drivers/memory/omap-gpmc.c GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs; cs 1218 drivers/memory/omap-gpmc.c int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, cs 1237 drivers/memory/omap-gpmc.c ret = gpmc_cs_program_settings(cs, &gpmc_s); cs 1241 drivers/memory/omap-gpmc.c return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s); cs 1461 drivers/memory/omap-gpmc.c int cs; cs 1463 drivers/memory/omap-gpmc.c for (cs = 0; cs < gpmc_cs_num; cs++) { cs 1464 drivers/memory/omap-gpmc.c if (!gpmc_cs_mem_enabled(cs)) cs 1466 drivers/memory/omap-gpmc.c gpmc_cs_delete_mem(cs); cs 1473 drivers/memory/omap-gpmc.c int cs; cs 1479 drivers/memory/omap-gpmc.c for (cs = 0; cs < gpmc_cs_num; cs++) { cs 1482 drivers/memory/omap-gpmc.c if (!gpmc_cs_mem_enabled(cs)) cs 1484 drivers/memory/omap-gpmc.c gpmc_cs_get_memconf(cs, &base, &size); cs 1485 drivers/memory/omap-gpmc.c if (gpmc_cs_insert_mem(cs, base, size)) { cs 1487 drivers/memory/omap-gpmc.c __func__, cs, base, base + size); cs 1488 drivers/memory/omap-gpmc.c gpmc_cs_disable_mem(cs); cs 1827 drivers/memory/omap-gpmc.c int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) cs 1893 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1); cs 2039 drivers/memory/omap-gpmc.c int ret, cs; cs 2044 drivers/memory/omap-gpmc.c if (of_property_read_u32(child, "reg", &cs) < 0) { cs 2061 drivers/memory/omap-gpmc.c name = gpmc_cs_get_name(cs); cs 2065 drivers/memory/omap-gpmc.c ret = gpmc_cs_request(cs, resource_size(&res), &base); cs 2067 drivers/memory/omap-gpmc.c dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); cs 2070 drivers/memory/omap-gpmc.c gpmc_cs_set_name(cs, child->full_name); cs 2082 drivers/memory/omap-gpmc.c cs); cs 2083 drivers/memory/omap-gpmc.c gpmc_cs_show_timings(cs, cs 2089 drivers/memory/omap-gpmc.c gpmc_cs_disable_mem(cs); cs 2099 drivers/memory/omap-gpmc.c ret = gpmc_cs_remap(cs, res.start); cs 2102 drivers/memory/omap-gpmc.c cs, &res.start); cs 2106 drivers/memory/omap-gpmc.c cs, GPMC_MEM_START); cs 2110 drivers/memory/omap-gpmc.c cs, GPMC_MEM_END); cs 2182 drivers/memory/omap-gpmc.c gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); cs 2184 drivers/memory/omap-gpmc.c ret = gpmc_cs_program_settings(cs, &gpmc_s); cs 2188 drivers/memory/omap-gpmc.c ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s); cs 2201 drivers/memory/omap-gpmc.c gpmc_cs_enable_mem(cs); cs 2225 drivers/memory/omap-gpmc.c gpmc_cs_free(cs); cs 86 drivers/memory/pl172.c struct device_node *np, u32 cs) cs 101 drivers/memory/pl172.c dev_err(&adev->dev, "invalid memory width cs%u\n", cs); cs 128 drivers/memory/pl172.c writel(cfg, pl172->base + MPMC_STATIC_CFG(cs)); cs 129 drivers/memory/pl172.c dev_dbg(&adev->dev, "mpmc static config cs%u: 0x%08x\n", cs, cfg); cs 133 drivers/memory/pl172.c MPMC_STATIC_WAIT_WEN(cs), cs 139 drivers/memory/pl172.c MPMC_STATIC_WAIT_OEN(cs), cs 145 drivers/memory/pl172.c MPMC_STATIC_WAIT_RD(cs), cs 151 drivers/memory/pl172.c MPMC_STATIC_WAIT_PAGE(cs), cs 157 drivers/memory/pl172.c MPMC_STATIC_WAIT_WR(cs), cs 163 drivers/memory/pl172.c MPMC_STATIC_WAIT_TURN(cs), cs 170 drivers/memory/pl172.c dev_err(&adev->dev, "failed to configure cs%u\n", cs); cs 177 drivers/memory/pl172.c u32 cs; cs 179 drivers/memory/pl172.c if (!of_property_read_u32(np, "mpmc,cs", &cs)) { cs 180 drivers/memory/pl172.c if (cs >= PL172_MAX_CS) { cs 181 drivers/memory/pl172.c dev_err(&adev->dev, "cs%u invalid\n", cs); cs 185 drivers/memory/pl172.c return pl172_setup_static(adev, np, cs); cs 72 drivers/memory/samsung/exynos-srom.c u32 cs, bw; cs 86 drivers/memory/samsung/exynos-srom.c cs = 1 << EXYNOS_SROM_BW__BYTEENABLE__SHIFT; cs 88 drivers/memory/samsung/exynos-srom.c cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT; cs 91 drivers/memory/samsung/exynos-srom.c bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank); cs 98 drivers/memory/ti-aemif.c u8 cs; cs 183 drivers/memory/ti-aemif.c offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4; cs 238 drivers/memory/ti-aemif.c offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4; cs 266 drivers/memory/ti-aemif.c u32 cs; cs 269 drivers/memory/ti-aemif.c if (of_property_read_u32(np, "ti,cs-chipselect", &cs)) { cs 274 drivers/memory/ti-aemif.c if (cs - aemif->cs_offset >= NUM_CS || cs < aemif->cs_offset) { cs 275 drivers/memory/ti-aemif.c dev_dbg(&pdev->dev, "cs number is incorrect %d", cs); cs 285 drivers/memory/ti-aemif.c data->cs = cs; cs 386 drivers/memory/ti-aemif.c aemif->cs_data[i].cs = pdata->abus_data[i].cs; cs 395 drivers/memory/ti-aemif.c aemif->cs_data[i].cs); cs 245 drivers/mfd/atmel-smc.c void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs, cs 248 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_SMC_SETUP(cs), conf->setup); cs 249 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_SMC_PULSE(cs), conf->pulse); cs 250 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_SMC_CYCLE(cs), conf->cycle); cs 251 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_SMC_MODE(cs), conf->mode); cs 267 drivers/mfd/atmel-smc.c int cs, const struct atmel_smc_cs_conf *conf) cs 269 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_HSMC_SETUP(layout, cs), conf->setup); cs 270 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_HSMC_PULSE(layout, cs), conf->pulse); cs 271 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_HSMC_CYCLE(layout, cs), conf->cycle); cs 272 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_HSMC_TIMINGS(layout, cs), conf->timings); cs 273 drivers/mfd/atmel-smc.c regmap_write(regmap, ATMEL_HSMC_MODE(layout, cs), conf->mode); cs 286 drivers/mfd/atmel-smc.c void atmel_smc_cs_conf_get(struct regmap *regmap, int cs, cs 289 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_SMC_SETUP(cs), &conf->setup); cs 290 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_SMC_PULSE(cs), &conf->pulse); cs 291 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_SMC_CYCLE(cs), &conf->cycle); cs 292 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_SMC_MODE(cs), &conf->mode); cs 308 drivers/mfd/atmel-smc.c int cs, struct atmel_smc_cs_conf *conf) cs 310 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_HSMC_SETUP(layout, cs), &conf->setup); cs 311 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_HSMC_PULSE(layout, cs), &conf->pulse); cs 312 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_HSMC_CYCLE(layout, cs), &conf->cycle); cs 313 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_HSMC_TIMINGS(layout, cs), &conf->timings); cs 314 drivers/mfd/atmel-smc.c regmap_read(regmap, ATMEL_HSMC_MODE(layout, cs), &conf->mode); cs 43 drivers/misc/genwqe/card_sysfs.c const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" }; cs 45 drivers/misc/genwqe/card_sysfs.c return sprintf(buf, "%s\n", cs[cd->card_state]); cs 53 drivers/misc/habanalabs/command_submission.c static void cs_get(struct hl_cs *cs) cs 55 drivers/misc/habanalabs/command_submission.c kref_get(&cs->refcount); cs 58 drivers/misc/habanalabs/command_submission.c static int cs_get_unless_zero(struct hl_cs *cs) cs 60 drivers/misc/habanalabs/command_submission.c return kref_get_unless_zero(&cs->refcount); cs 63 drivers/misc/habanalabs/command_submission.c static void cs_put(struct hl_cs *cs) cs 65 drivers/misc/habanalabs/command_submission.c kref_put(&cs->refcount, cs_do_release); cs 85 drivers/misc/habanalabs/command_submission.c parser.ctx_id = job->cs->ctx->asid; cs 86 drivers/misc/habanalabs/command_submission.c parser.cs_sequence = job->cs->sequence; cs 125 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs = job->cs; cs 147 drivers/misc/habanalabs/command_submission.c spin_lock(&cs->job_lock); cs 149 drivers/misc/habanalabs/command_submission.c spin_unlock(&cs->job_lock); cs 154 drivers/misc/habanalabs/command_submission.c cs_put(cs); cs 161 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs = container_of(ref, struct hl_cs, cs 163 drivers/misc/habanalabs/command_submission.c struct hl_device *hdev = cs->ctx->hdev; cs 166 drivers/misc/habanalabs/command_submission.c cs->completed = true; cs 176 drivers/misc/habanalabs/command_submission.c list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) cs 180 drivers/misc/habanalabs/command_submission.c if (cs->submitted) { cs 199 drivers/misc/habanalabs/command_submission.c hl_int_hw_queue_update_ci(cs); cs 203 drivers/misc/habanalabs/command_submission.c list_del_init(&cs->mirror_node); cs 210 drivers/misc/habanalabs/command_submission.c if ((!cs->timedout) && cs 214 drivers/misc/habanalabs/command_submission.c if (cs->tdr_active) cs 215 drivers/misc/habanalabs/command_submission.c cancel_delayed_work_sync(&cs->work_tdr); cs 238 drivers/misc/habanalabs/command_submission.c hl_debugfs_remove_cs(cs); cs 240 drivers/misc/habanalabs/command_submission.c hl_ctx_put(cs->ctx); cs 242 drivers/misc/habanalabs/command_submission.c if (cs->timedout) cs 243 drivers/misc/habanalabs/command_submission.c dma_fence_set_error(cs->fence, -ETIMEDOUT); cs 244 drivers/misc/habanalabs/command_submission.c else if (cs->aborted) cs 245 drivers/misc/habanalabs/command_submission.c dma_fence_set_error(cs->fence, -EIO); cs 247 drivers/misc/habanalabs/command_submission.c dma_fence_signal(cs->fence); cs 248 drivers/misc/habanalabs/command_submission.c dma_fence_put(cs->fence); cs 250 drivers/misc/habanalabs/command_submission.c kfree(cs); cs 257 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs = container_of(work, struct hl_cs, cs 259 drivers/misc/habanalabs/command_submission.c rc = cs_get_unless_zero(cs); cs 263 drivers/misc/habanalabs/command_submission.c if ((!cs->submitted) || (cs->completed)) { cs 264 drivers/misc/habanalabs/command_submission.c cs_put(cs); cs 269 drivers/misc/habanalabs/command_submission.c cs->timedout = true; cs 271 drivers/misc/habanalabs/command_submission.c hdev = cs->ctx->hdev; cs 272 drivers/misc/habanalabs/command_submission.c ctx_asid = cs->ctx->asid; cs 276 drivers/misc/habanalabs/command_submission.c ctx_asid, cs->sequence); cs 278 drivers/misc/habanalabs/command_submission.c cs_put(cs); cs 289 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs; cs 292 drivers/misc/habanalabs/command_submission.c cs = kzalloc(sizeof(*cs), GFP_ATOMIC); cs 293 drivers/misc/habanalabs/command_submission.c if (!cs) cs 296 drivers/misc/habanalabs/command_submission.c cs->ctx = ctx; cs 297 drivers/misc/habanalabs/command_submission.c cs->submitted = false; cs 298 drivers/misc/habanalabs/command_submission.c cs->completed = false; cs 299 drivers/misc/habanalabs/command_submission.c INIT_LIST_HEAD(&cs->job_list); cs 300 drivers/misc/habanalabs/command_submission.c INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); cs 301 drivers/misc/habanalabs/command_submission.c kref_init(&cs->refcount); cs 302 drivers/misc/habanalabs/command_submission.c spin_lock_init(&cs->job_lock); cs 312 drivers/misc/habanalabs/command_submission.c cs->fence = &fence->base_fence; cs 329 drivers/misc/habanalabs/command_submission.c cs->sequence = fence->cs_seq; cs 341 drivers/misc/habanalabs/command_submission.c *cs_new = cs; cs 348 drivers/misc/habanalabs/command_submission.c kfree(cs); cs 352 drivers/misc/habanalabs/command_submission.c static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) cs 356 drivers/misc/habanalabs/command_submission.c list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) cs 362 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs, *tmp; cs 368 drivers/misc/habanalabs/command_submission.c list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, cs 370 drivers/misc/habanalabs/command_submission.c cs_get(cs); cs 371 drivers/misc/habanalabs/command_submission.c cs->aborted = true; cs 373 drivers/misc/habanalabs/command_submission.c cs->ctx->asid, cs->sequence); cs 374 drivers/misc/habanalabs/command_submission.c cs_rollback(hdev, cs); cs 375 drivers/misc/habanalabs/command_submission.c cs_put(cs); cs 383 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs = job->cs; cs 384 drivers/misc/habanalabs/command_submission.c struct hl_device *hdev = cs->ctx->hdev; cs 471 drivers/misc/habanalabs/command_submission.c struct hl_cs *cs; cs 504 drivers/misc/habanalabs/command_submission.c rc = allocate_cs(hdev, hpriv->ctx, &cs); cs 510 drivers/misc/habanalabs/command_submission.c *cs_seq = cs->sequence; cs 512 drivers/misc/habanalabs/command_submission.c hl_debugfs_add_cs(cs); cs 540 drivers/misc/habanalabs/command_submission.c job->cs = cs; cs 549 drivers/misc/habanalabs/command_submission.c cs->jobs_in_queue_cnt[job->hw_queue_id]++; cs 551 drivers/misc/habanalabs/command_submission.c list_add_tail(&job->cs_node, &cs->job_list); cs 560 drivers/misc/habanalabs/command_submission.c cs_get(cs); cs 568 drivers/misc/habanalabs/command_submission.c cs->ctx->asid, cs->sequence, job->id, rc); cs 576 drivers/misc/habanalabs/command_submission.c cs->ctx->asid, cs->sequence); cs 581 drivers/misc/habanalabs/command_submission.c rc = hl_hw_queue_schedule_cs(cs); cs 585 drivers/misc/habanalabs/command_submission.c cs->ctx->asid, cs->sequence, rc); cs 598 drivers/misc/habanalabs/command_submission.c cs_rollback(hdev, cs); cs 603 drivers/misc/habanalabs/command_submission.c cs_put(cs); cs 131 drivers/misc/habanalabs/debugfs.c struct hl_cs *cs; cs 136 drivers/misc/habanalabs/debugfs.c list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) { cs 145 drivers/misc/habanalabs/debugfs.c cs->sequence, cs->ctx->asid, cs 146 drivers/misc/habanalabs/debugfs.c kref_read(&cs->refcount), cs 147 drivers/misc/habanalabs/debugfs.c cs->submitted, cs->completed); cs 174 drivers/misc/habanalabs/debugfs.c if (job->cs) cs 177 drivers/misc/habanalabs/debugfs.c job->id, job->cs->sequence, job->cs->ctx->asid, cs 1090 drivers/misc/habanalabs/debugfs.c void hl_debugfs_add_cs(struct hl_cs *cs) cs 1092 drivers/misc/habanalabs/debugfs.c struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs; cs 1095 drivers/misc/habanalabs/debugfs.c list_add(&cs->debugfs_list, &dev_entry->cs_list); cs 1099 drivers/misc/habanalabs/debugfs.c void hl_debugfs_remove_cs(struct hl_cs *cs) cs 1101 drivers/misc/habanalabs/debugfs.c struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs; cs 1104 drivers/misc/habanalabs/debugfs.c list_del(&cs->debugfs_list); cs 763 drivers/misc/habanalabs/habanalabs.h struct hl_cs *cs; cs 1436 drivers/misc/habanalabs/habanalabs.h int hl_hw_queue_schedule_cs(struct hl_cs *cs); cs 1439 drivers/misc/habanalabs/habanalabs.h void hl_int_hw_queue_update_ci(struct hl_cs *cs); cs 1580 drivers/misc/habanalabs/habanalabs.h void hl_debugfs_add_cs(struct hl_cs *cs); cs 1581 drivers/misc/habanalabs/habanalabs.h void hl_debugfs_remove_cs(struct hl_cs *cs); cs 1624 drivers/misc/habanalabs/habanalabs.h static inline void hl_debugfs_add_cs(struct hl_cs *cs) cs 1628 drivers/misc/habanalabs/habanalabs.h static inline void hl_debugfs_remove_cs(struct hl_cs *cs) cs 37 drivers/misc/habanalabs/hw_queue.c void hl_int_hw_queue_update_ci(struct hl_cs *cs) cs 39 drivers/misc/habanalabs/hw_queue.c struct hl_device *hdev = cs->ctx->hdev; cs 51 drivers/misc/habanalabs/hw_queue.c q->ci += cs->jobs_in_queue_cnt[i]; cs 232 drivers/misc/habanalabs/hw_queue.c struct hl_device *hdev = job->cs->ctx->hdev; cs 290 drivers/misc/habanalabs/hw_queue.c struct hl_device *hdev = job->cs->ctx->hdev; cs 316 drivers/misc/habanalabs/hw_queue.c int hl_hw_queue_schedule_cs(struct hl_cs *cs) cs 318 drivers/misc/habanalabs/hw_queue.c struct hl_device *hdev = cs->ctx->hdev; cs 336 drivers/misc/habanalabs/hw_queue.c if (cs->jobs_in_queue_cnt[i]) { cs 338 drivers/misc/habanalabs/hw_queue.c cs->jobs_in_queue_cnt[i], true); cs 344 drivers/misc/habanalabs/hw_queue.c if (cs->jobs_in_queue_cnt[i]) { cs 346 drivers/misc/habanalabs/hw_queue.c cs->jobs_in_queue_cnt[i]); cs 354 drivers/misc/habanalabs/hw_queue.c list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); cs 359 drivers/misc/habanalabs/hw_queue.c struct hl_cs, mirror_node) == cs)) { cs 360 drivers/misc/habanalabs/hw_queue.c cs->tdr_active = true; cs 361 drivers/misc/habanalabs/hw_queue.c schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); cs 375 drivers/misc/habanalabs/hw_queue.c list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) cs 381 drivers/misc/habanalabs/hw_queue.c cs->submitted = true; cs 390 drivers/misc/habanalabs/hw_queue.c (cs->jobs_in_queue_cnt[i])) { cs 393 drivers/misc/habanalabs/hw_queue.c atomic_add(cs->jobs_in_queue_cnt[i], free_slots); cs 684 drivers/mmc/host/mvsdio.c const struct mbus_dram_window *cs = dram->cs + i; cs 685 drivers/mmc/host/mvsdio.c writel(((cs->size - 1) & 0xffff0000) | cs 686 drivers/mmc/host/mvsdio.c (cs->mbus_attr << 8) | cs 689 drivers/mmc/host/mvsdio.c writel(cs->base, iobase + MVSD_WINDOW_BASE(i)); cs 102 drivers/mmc/host/sdhci-pxav3.c const struct mbus_dram_window *cs = dram->cs + i; cs 105 drivers/mmc/host/sdhci-pxav3.c writel(((cs->size - 1) & 0xffff0000) | cs 106 drivers/mmc/host/sdhci-pxav3.c (cs->mbus_attr << 8) | cs 110 drivers/mmc/host/sdhci-pxav3.c writel(cs->base, regs + SDHCI_WINDOW_BASE(i)); cs 46 drivers/mtd/maps/nettel.c #define SC520_PAR(cs, addr, size) \ cs 47 drivers/mtd/maps/nettel.c ((cs) | \ cs 30 drivers/mtd/maps/pismo.c struct pismo_cs_block cs[PISMO_NUM_CS]; cs 156 drivers/mtd/maps/pismo.c const struct pismo_cs_block *cs, phys_addr_t base) cs 162 drivers/mtd/maps/pismo.c region.type = cs->type; cs 163 drivers/mtd/maps/pismo.c region.width = pismo_width_to_bytes(cs->width); cs 164 drivers/mtd/maps/pismo.c region.access = le16_to_cpu(cs->access); cs 165 drivers/mtd/maps/pismo.c region.size = le32_to_cpu(cs->size); cs 168 drivers/mtd/maps/pismo.c dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width); cs 179 drivers/mtd/maps/pismo.c i, cs->device, region.type, region.access, region.size / 1024); cs 243 drivers/mtd/maps/pismo.c for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++) cs 244 drivers/mtd/maps/pismo.c if (eeprom.cs[i].type != 0xff) cs 245 drivers/mtd/maps/pismo.c pismo_add_one(pismo, i, &eeprom.cs[i], cs 117 drivers/mtd/nand/raw/atmel/nand-controller.c #define ATMEL_NFC_CSID(cs) ((cs) << 22) cs 168 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_cs cs[]; cs 183 drivers/mtd/nand/raw/atmel/nand-controller.c u8 cs; cs 489 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_nand_select_chip(struct nand_chip *chip, int cs) cs 493 drivers/mtd/nand/raw/atmel/nand-controller.c if (cs < 0 || cs >= nand->numcs) { cs 499 drivers/mtd/nand/raw/atmel/nand-controller.c nand->activecs = &nand->cs[cs]; cs 518 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_hsmc_nand_select_chip(struct nand_chip *chip, int cs) cs 526 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_select_chip(chip, cs); cs 564 drivers/mtd/nand/raw/atmel/nand-controller.c op |= ATMEL_NFC_CSID(nc->op.cs) | cs 620 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; cs 919 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; cs 946 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; cs 1004 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; cs 1406 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_cs *cs; cs 1418 drivers/mtd/nand/raw/atmel/nand-controller.c cs = &nand->cs[csline]; cs 1419 drivers/mtd/nand/raw/atmel/nand-controller.c cs->smcconf = smcconf; cs 1420 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf); cs 1431 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_cs *cs; cs 1443 drivers/mtd/nand/raw/atmel/nand-controller.c cs = &nand->cs[csline]; cs 1444 drivers/mtd/nand/raw/atmel/nand-controller.c cs->smcconf = smcconf; cs 1446 drivers/mtd/nand/raw/atmel/nand-controller.c if (cs->rb.type == ATMEL_NAND_NATIVE_RB) cs 1447 drivers/mtd/nand/raw/atmel/nand-controller.c cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id); cs 1449 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id, cs 1450 drivers/mtd/nand/raw/atmel/nand-controller.c &cs->smcconf); cs 1521 drivers/mtd/nand/raw/atmel/nand-controller.c BIT(nand->cs[i].id), BIT(nand->cs[i].id)); cs 1573 drivers/mtd/nand/raw/atmel/nand-controller.c nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL); cs 1613 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].id = val; cs 1615 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].io.dma = res.start; cs 1616 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res); cs 1617 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nand->cs[i].io.virt)) cs 1618 drivers/mtd/nand/raw/atmel/nand-controller.c return ERR_CAST(nand->cs[i].io.virt); cs 1624 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB; cs 1625 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].rb.id = val; cs 1638 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB; cs 1639 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].rb.gpio = gpio; cs 1655 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].csgpio = gpio; cs 1725 drivers/mtd/nand/raw/atmel/nand-controller.c nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs), cs 1733 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].io.virt = devm_ioremap_resource(dev, res); cs 1734 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nand->cs[0].io.virt)) cs 1735 drivers/mtd/nand/raw/atmel/nand-controller.c return PTR_ERR(nand->cs[0].io.virt); cs 1737 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].io.dma = res->start; cs 1747 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].id = 3; cs 1758 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB; cs 1759 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].rb.gpio = gpio; cs 1770 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].csgpio = gpio; cs 1947 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[0].id); cs 21 drivers/mtd/nand/raw/au1550nd.c int cs; cs 197 drivers/mtd/nand/raw/au1550nd.c alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL); cs 383 drivers/mtd/nand/raw/au1550nd.c int ret, cs; cs 419 drivers/mtd/nand/raw/au1550nd.c cs = find_nand_cs(r->start); cs 420 drivers/mtd/nand/raw/au1550nd.c if (cs < 0) { cs 425 drivers/mtd/nand/raw/au1550nd.c ctx->cs = cs; cs 190 drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c int cs) cs 78 drivers/mtd/nand/raw/brcmnand/brcmnand.c u32 cs; cs 234 drivers/mtd/nand/raw/brcmnand/brcmnand.c int cs; cs 687 drivers/mtd/nand/raw/brcmnand/brcmnand.c (host->cs << 16) | ((addr >> 32) & 0xffff)); cs 694 drivers/mtd/nand/raw/brcmnand/brcmnand.c static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, cs 701 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (cs == 0 && ctrl->cs0_offsets) cs 706 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (cs && offs_cs1) cs 707 drivers/mtd/nand/raw/brcmnand/brcmnand.c return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs; cs 709 drivers/mtd/nand/raw/brcmnand/brcmnand.c return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; cs 724 drivers/mtd/nand/raw/brcmnand/brcmnand.c int cs = host->cs; cs 736 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (cs >= 4) cs 738 drivers/mtd/nand/raw/brcmnand/brcmnand.c shift = (cs % 4) * bits; cs 740 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (cs >= 5) cs 742 drivers/mtd/nand/raw/brcmnand/brcmnand.c shift = (cs % 5) * bits; cs 807 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); cs 839 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, cs 852 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, cs 1661 drivers/mtd/nand/raw/brcmnand/brcmnand.c desc->cs = host->cs; cs 2077 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); cs 2078 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, cs 2080 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, cs 2316 drivers/mtd/nand/raw/brcmnand/brcmnand.c offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); cs 2376 drivers/mtd/nand/raw/brcmnand/brcmnand.c ret = of_property_read_u32(dn, "reg", &host->cs); cs 2388 drivers/mtd/nand/raw/brcmnand/brcmnand.c host->cs); cs 2419 drivers/mtd/nand/raw/brcmnand/brcmnand.c cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); cs 2438 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); cs 2439 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, cs 2441 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, cs 2443 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1); cs 2444 drivers/mtd/nand/raw/brcmnand/brcmnand.c u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2); cs 171 drivers/mtd/nand/raw/cs553x_nand.c static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) cs 178 drivers/mtd/nand/raw/cs553x_nand.c cs, mmio ? "MM" : "P", adr); cs 224 drivers/mtd/nand/raw/cs553x_nand.c new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); cs 235 drivers/mtd/nand/raw/cs553x_nand.c cs553x_mtd[cs] = new_mtd; cs 193 drivers/mtd/nand/raw/denali.c static void denali_select_target(struct nand_chip *chip, int cs) cs 196 drivers/mtd/nand/raw/denali.c struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs]; cs 1134 drivers/mtd/nand/raw/denali.c denali_select_target(chip, op->cs); cs 532 drivers/mtd/nand/raw/fsl_elbc_nand.c static void fsl_elbc_select_chip(struct nand_chip *chip, int cs) cs 499 drivers/mtd/nand/raw/fsl_ifc_nand.c static void fsl_ifc_select_chip(struct nand_chip *chip, int cs) cs 759 drivers/mtd/nand/raw/fsl_ifc_nand.c uint32_t cs = priv->bank; cs 783 drivers/mtd/nand/raw/fsl_ifc_nand.c csor = ifc_in32(&ifc_global->csor_cs[cs].csor); cs 784 drivers/mtd/nand/raw/fsl_ifc_nand.c csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); cs 788 drivers/mtd/nand/raw/fsl_ifc_nand.c ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); cs 789 drivers/mtd/nand/raw/fsl_ifc_nand.c ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); cs 807 drivers/mtd/nand/raw/fsl_ifc_nand.c ifc_out32(cs << IFC_NAND_CSEL_SHIFT, cs 824 drivers/mtd/nand/raw/fsl_ifc_nand.c ifc_out32(csor, &ifc_global->csor_cs[cs].csor); cs 825 drivers/mtd/nand/raw/fsl_ifc_nand.c ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); cs 839 drivers/mtd/nand/raw/hisi504_nand.c int cs; cs 843 drivers/mtd/nand/raw/hisi504_nand.c for (cs = 0; cs < nanddev_ntargets(&chip->base); cs++) cs 844 drivers/mtd/nand/raw/hisi504_nand.c hisi_nfc_send_cmd_reset(host, cs); cs 53 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c struct ingenic_nand_cs cs[]; cs 149 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c struct ingenic_nand_cs *cs; cs 153 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c cs = &nfc->cs[nfc->selected]; cs 154 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c jz4780_nemc_assert(nfc->dev, cs->bank, false); cs 165 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c struct ingenic_nand_cs *cs; cs 170 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c cs = &nfc->cs[nfc->selected]; cs 172 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE); cs 178 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c writeb(cmd, cs->base + nfc->soc_info->addr_offset); cs 180 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c writeb(cmd, cs->base + nfc->soc_info->cmd_offset); cs 312 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c struct ingenic_nand_cs *cs; cs 318 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c cs = &nfc->cs[chipnr]; cs 324 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c cs->bank = be32_to_cpu(*reg); cs 326 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND); cs 328 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c cs->base = devm_platform_ioremap_resource(pdev, chipnr); cs 329 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c if (IS_ERR(cs->base)) cs 330 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c return PTR_ERR(cs->base); cs 357 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c cs->bank); cs 362 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset; cs 363 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset; cs 442 drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL); cs 114 drivers/mtd/nand/raw/internals.h if (WARN_ON(op->cs >= nanddev_ntargets(&chip->base))) cs 171 drivers/mtd/nand/raw/marvell_nand.c #define NDSR_CMDD(cs) BIT(8 - cs) cs 309 drivers/mtd/nand/raw/marvell_nand.c unsigned int cs; cs 2110 drivers/mtd/nand/raw/marvell_nand.c marvell_nfc_select_target(chip, op->cs); cs 2498 drivers/mtd/nand/raw/marvell_nand.c marvell_nand->sels[0].cs); cs 2522 drivers/mtd/nand/raw/marvell_nand.c u32 cs, rb; cs 2561 drivers/mtd/nand/raw/marvell_nand.c cs = i; cs 2564 drivers/mtd/nand/raw/marvell_nand.c ret = of_property_read_u32_index(np, "reg", i, &cs); cs 2572 drivers/mtd/nand/raw/marvell_nand.c if (cs >= nfc->caps->max_cs_nb) { cs 2574 drivers/mtd/nand/raw/marvell_nand.c cs, nfc->caps->max_cs_nb); cs 2578 drivers/mtd/nand/raw/marvell_nand.c if (test_and_set_bit(cs, &nfc->assigned_cs)) { cs 2579 drivers/mtd/nand/raw/marvell_nand.c dev_err(dev, "CS %d already assigned\n", cs); cs 2591 drivers/mtd/nand/raw/marvell_nand.c marvell_nand->sels[i].cs = cs; cs 2592 drivers/mtd/nand/raw/marvell_nand.c switch (cs) { cs 581 drivers/mtd/nand/raw/meson_nand.c u32 cs = nfc->param.chip_select; cs 588 drivers/mtd/nand/raw/meson_nand.c nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0; cs 590 drivers/mtd/nand/raw/meson_nand.c addrs[0] = cs | NFC_CMD_ALE | 0; cs 595 drivers/mtd/nand/raw/meson_nand.c addrs[1] = cs | NFC_CMD_ALE | 0; cs 599 drivers/mtd/nand/raw/meson_nand.c addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0); cs 600 drivers/mtd/nand/raw/meson_nand.c addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1); cs 604 drivers/mtd/nand/raw/meson_nand.c cs | NFC_CMD_ALE | ROW_ADDER(page, 2); cs 616 drivers/mtd/nand/raw/meson_nand.c nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART; cs 902 drivers/mtd/nand/raw/meson_nand.c meson_nfc_select_chip(nand, op->cs); cs 139 drivers/mtd/nand/raw/mxc_nand.c void (*select_chip)(struct nand_chip *chip, int cs); cs 236 drivers/mtd/nand/raw/nand_base.c void nand_select_target(struct nand_chip *chip, unsigned int cs) cs 242 drivers/mtd/nand/raw/nand_base.c if (WARN_ON(cs > nanddev_ntargets(&chip->base))) cs 245 drivers/mtd/nand/raw/nand_base.c chip->cur_cs = cs; cs 248 drivers/mtd/nand/raw/nand_base.c chip->legacy.select_chip(chip, cs); cs 190 drivers/mtd/nand/raw/ndfc.c u32 cs; cs 200 drivers/mtd/nand/raw/ndfc.c cs = be32_to_cpu(reg[0]); cs 201 drivers/mtd/nand/raw/ndfc.c if (cs >= NDFC_MAX_CS) { cs 202 drivers/mtd/nand/raw/ndfc.c dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs); cs 206 drivers/mtd/nand/raw/ndfc.c ndfc = &ndfc_ctrl[cs]; cs 207 drivers/mtd/nand/raw/ndfc.c ndfc->chip_select = cs; cs 189 drivers/mtd/nand/raw/omap2.c static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, cs 206 drivers/mtd/nand/raw/omap2.c val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) | cs 220 drivers/mtd/nand/raw/omap2.c static int omap_prefetch_reset(int cs, struct omap_nand_info *info) cs 226 drivers/mtd/nand/raw/omap2.c if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs) cs 1755 drivers/mtd/nand/raw/omap2.c u32 cs; cs 1757 drivers/mtd/nand/raw/omap2.c if (of_property_read_u32(child, "reg", &cs) < 0) { cs 1762 drivers/mtd/nand/raw/omap2.c info->gpmc_cs = cs; cs 438 drivers/mtd/nand/raw/qcom_nandc.c int cs; cs 2782 drivers/mtd/nand/raw/qcom_nandc.c ret = of_property_read_u32(dn, "reg", &host->cs); cs 2789 drivers/mtd/nand/raw/qcom_nandc.c mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); cs 1368 drivers/mtd/nand/raw/stm32_fmc2_nand.c ret = stm32_fmc2_select_chip(chip, op->cs); cs 1794 drivers/mtd/nand/raw/stm32_fmc2_nand.c u32 cs; cs 1807 drivers/mtd/nand/raw/stm32_fmc2_nand.c ret = of_property_read_u32_index(dn, "reg", i, &cs); cs 1814 drivers/mtd/nand/raw/stm32_fmc2_nand.c if (cs > FMC2_MAX_CE) { cs 1815 drivers/mtd/nand/raw/stm32_fmc2_nand.c dev_err(fmc2->dev, "invalid reg value: %d\n", cs); cs 1819 drivers/mtd/nand/raw/stm32_fmc2_nand.c if (fmc2->cs_assigned & BIT(cs)) { cs 1820 drivers/mtd/nand/raw/stm32_fmc2_nand.c dev_err(fmc2->dev, "cs already assigned: %d\n", cs); cs 1824 drivers/mtd/nand/raw/stm32_fmc2_nand.c fmc2->cs_assigned |= BIT(cs); cs 1825 drivers/mtd/nand/raw/stm32_fmc2_nand.c nand->cs_used[i] = cs; cs 167 drivers/mtd/nand/raw/sunxi_nand.c u8 cs; cs 405 drivers/mtd/nand/raw/sunxi_nand.c static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs) cs 413 drivers/mtd/nand/raw/sunxi_nand.c if (cs > 0 && cs >= sunxi_nand->nsels) cs 419 drivers/mtd/nand/raw/sunxi_nand.c sel = &sunxi_nand->sels[cs]; cs 420 drivers/mtd/nand/raw/sunxi_nand.c ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift); cs 1910 drivers/mtd/nand/raw/sunxi_nand.c sunxi_nfc_select_chip(nand, op->cs); cs 1912 drivers/mtd/nand/raw/sunxi_nand.c if (sunxi_nand->sels[op->cs].rb >= 0) cs 1975 drivers/mtd/nand/raw/sunxi_nand.c sunxi_nand->sels[i].cs = tmp; cs 107 drivers/mtd/nand/raw/tango_nand.c #define XFER_CFG(cs, page_count, steps, metadata_size) \ cs 108 drivers/mtd/nand/raw/tango_nand.c ((cs) << 24 | (page_count) << 16 | (steps) << 8 | (metadata_size)) cs 535 drivers/mtd/nand/raw/tango_nand.c u32 cs; cs 554 drivers/mtd/nand/raw/tango_nand.c err = of_property_read_u32_index(np, "reg", 0, &cs); cs 558 drivers/mtd/nand/raw/tango_nand.c if (cs >= MAX_CS) cs 575 drivers/mtd/nand/raw/tango_nand.c tchip->base = nfc->pbus_base + (cs * 256); cs 585 drivers/mtd/nand/raw/tango_nand.c tchip->xfer_cfg = XFER_CFG(cs, 1, ecc->steps, METADATA_SIZE); cs 596 drivers/mtd/nand/raw/tango_nand.c nfc->chips[cs] = tchip; cs 603 drivers/mtd/nand/raw/tango_nand.c int cs; cs 608 drivers/mtd/nand/raw/tango_nand.c for (cs = 0; cs < MAX_CS; ++cs) { cs 609 drivers/mtd/nand/raw/tango_nand.c if (nfc->chips[cs]) cs 610 drivers/mtd/nand/raw/tango_nand.c nand_release(&nfc->chips[cs]->nand_chip); cs 186 drivers/mtd/nand/raw/tegra_nand.c int cs[1]; cs 463 drivers/mtd/nand/raw/tegra_nand.c ctrl->cur_cs = nand->cs[die_nr]; cs 470 drivers/mtd/nand/raw/tegra_nand.c tegra_nand_select_target(chip, op->cs); cs 1067 drivers/mtd/nand/raw/tegra_nand.c u32 cs; cs 1083 drivers/mtd/nand/raw/tegra_nand.c ret = of_property_read_u32(np_nand, "reg", &cs); cs 1093 drivers/mtd/nand/raw/tegra_nand.c nand->cs[0] = cs; cs 63 drivers/mtd/nand/raw/txx9ndfmc.c int cs; cs 146 drivers/mtd/nand/raw/txx9ndfmc.c if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) { cs 148 drivers/mtd/nand/raw/txx9ndfmc.c mcr |= TXX9_NDFMCR_CS(txx9_priv->cs); cs 341 drivers/mtd/nand/raw/txx9ndfmc.c txx9_priv->cs = i; cs 345 drivers/mtd/nand/raw/txx9ndfmc.c txx9_priv->cs = -1; cs 484 drivers/mtd/nand/raw/vf610_nfc.c static void vf610_nfc_select_target(struct nand_chip *chip, unsigned int cs) cs 496 drivers/mtd/nand/raw/vf610_nfc.c tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT; cs 505 drivers/mtd/nand/raw/vf610_nfc.c vf610_nfc_select_target(chip, op->cs); cs 157 drivers/mtd/nand/raw/xway_nand.c u32 cs; cs 190 drivers/mtd/nand/raw/xway_nand.c err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); cs 191 drivers/mtd/nand/raw/xway_nand.c if (!err && cs == 1) cs 93 drivers/mtd/spi-nor/aspeed-smc.c int cs; cs 197 drivers/mtd/spi-nor/aspeed-smc.c #define SEGMENT_ADDR_REG(controller, cs) \ cs 198 drivers/mtd/spi-nor/aspeed-smc.c ((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4) cs 258 drivers/mtd/spi-nor/aspeed-smc.c return BIT(chip->controller->info->we0 + chip->cs); cs 444 drivers/mtd/spi-nor/aspeed-smc.c reg = readl(SEGMENT_ADDR_REG(controller, chip->cs)); cs 462 drivers/mtd/spi-nor/aspeed-smc.c static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start, cs 471 drivers/mtd/spi-nor/aspeed-smc.c seg_reg = SEGMENT_ADDR_REG(controller, cs); cs 489 drivers/mtd/spi-nor/aspeed-smc.c cs, size >> 20); cs 502 drivers/mtd/spi-nor/aspeed-smc.c dev_err(chip->nor.dev, "CE%d window invalid", cs); cs 510 drivers/mtd/spi-nor/aspeed-smc.c cs, start, end, size >> 20); cs 550 drivers/mtd/spi-nor/aspeed-smc.c if (chip->cs == 0 && controller->info == &spi_2500_info && cs 555 drivers/mtd/spi-nor/aspeed-smc.c chip->cs, size >> 20); cs 565 drivers/mtd/spi-nor/aspeed-smc.c if (chip->cs) { cs 566 drivers/mtd/spi-nor/aspeed-smc.c u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1)); cs 573 drivers/mtd/spi-nor/aspeed-smc.c size = chip_set_segment(chip, chip->cs, start, size); cs 583 drivers/mtd/spi-nor/aspeed-smc.c if (chip->cs < controller->info->nce - 1) cs 584 drivers/mtd/spi-nor/aspeed-smc.c chip_set_segment(chip, chip->cs + 1, start + size, 0); cs 590 drivers/mtd/spi-nor/aspeed-smc.c chip->cs, (u32)chip->nor.mtd.size >> 20); cs 614 drivers/mtd/spi-nor/aspeed-smc.c reg &= ~(3 << (chip->cs * 2)); cs 615 drivers/mtd/spi-nor/aspeed-smc.c reg |= chip->type << (chip->cs * 2); cs 630 drivers/mtd/spi-nor/aspeed-smc.c reg |= 1 << chip->cs; cs 667 drivers/mtd/spi-nor/aspeed-smc.c dev_warn(chip->nor.dev, "CE%d window closed", chip->cs); cs 760 drivers/mtd/spi-nor/aspeed-smc.c unsigned int cs; cs 772 drivers/mtd/spi-nor/aspeed-smc.c ret = of_property_read_u32(child, "reg", &cs); cs 778 drivers/mtd/spi-nor/aspeed-smc.c if (cs >= info->nce) { cs 780 drivers/mtd/spi-nor/aspeed-smc.c cs); cs 785 drivers/mtd/spi-nor/aspeed-smc.c if (controller->chips[cs]) { cs 787 drivers/mtd/spi-nor/aspeed-smc.c cs, dev_name(controller->chips[cs]->nor.dev)); cs 799 drivers/mtd/spi-nor/aspeed-smc.c chip->ctl = controller->regs + info->ctl0 + cs * 4; cs 800 drivers/mtd/spi-nor/aspeed-smc.c chip->cs = cs; cs 836 drivers/mtd/spi-nor/aspeed-smc.c controller->chips[cs] = chip; cs 58 drivers/mtd/spi-nor/cadence-quadspi.c u8 cs; cs 714 drivers/mtd/spi-nor/cadence-quadspi.c unsigned int chip_select = f_pdata->cs; cs 867 drivers/mtd/spi-nor/cadence-quadspi.c int switch_cs = (cqspi->current_cs != f_pdata->cs); cs 880 drivers/mtd/spi-nor/cadence-quadspi.c cqspi->current_cs = f_pdata->cs; cs 1228 drivers/mtd/spi-nor/cadence-quadspi.c unsigned int cs; cs 1240 drivers/mtd/spi-nor/cadence-quadspi.c ret = of_property_read_u32(np, "reg", &cs); cs 1246 drivers/mtd/spi-nor/cadence-quadspi.c if (cs >= CQSPI_MAX_CHIPSELECT) { cs 1248 drivers/mtd/spi-nor/cadence-quadspi.c dev_err(dev, "Chip select %d out of range.\n", cs); cs 1252 drivers/mtd/spi-nor/cadence-quadspi.c f_pdata = &cqspi->f_pdata[cs]; cs 1254 drivers/mtd/spi-nor/cadence-quadspi.c f_pdata->cs = cs; cs 1278 drivers/mtd/spi-nor/cadence-quadspi.c dev_name(dev), cs); cs 46 drivers/mtd/spi-nor/hisi-sfc.c #define OP_CFG_FM_CS(cs) ((cs) << 11) cs 116 drivers/mtd/tests/oobtest.c static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs, cs 124 drivers/mtd/tests/oobtest.c for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) { cs 137 drivers/mtd/tests/oobtest.c #define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\ cs 144 drivers/mtd/tests/oobtest.c static size_t memffshow(loff_t addr, loff_t offset, const void *cs, cs 152 drivers/mtd/tests/oobtest.c for (su1 = cs; 0 < count; ++su1, count--, i++) { cs 773 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c struct aq_stats_s *cs = &self->curr_stats; cs 801 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self); cs 802 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self); cs 803 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self); cs 804 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self); cs 1194 drivers/net/ethernet/cavium/liquidio/octeon_device.c struct octeon_core_setup *cs = NULL; cs 1239 drivers/net/ethernet/cavium/liquidio/octeon_device.c cs = &core_setup[oct->octeon_id]; cs 1241 drivers/net/ethernet/cavium/liquidio/octeon_device.c if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) { cs 1243 drivers/net/ethernet/cavium/liquidio/octeon_device.c (u32)sizeof(*cs), cs 1247 drivers/net/ethernet/cavium/liquidio/octeon_device.c memcpy(cs, get_rbd( cs 1248 drivers/net/ethernet/cavium/liquidio/octeon_device.c recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs)); cs 1250 drivers/net/ethernet/cavium/liquidio/octeon_device.c strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); cs 1251 drivers/net/ethernet/cavium/liquidio/octeon_device.c strncpy(oct->boardinfo.serial_number, cs->board_serial_number, cs 1254 drivers/net/ethernet/cavium/liquidio/octeon_device.c octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); cs 1256 drivers/net/ethernet/cavium/liquidio/octeon_device.c oct->boardinfo.major = cs->board_rev_major; cs 1257 drivers/net/ethernet/cavium/liquidio/octeon_device.c oct->boardinfo.minor = cs->board_rev_minor; cs 1261 drivers/net/ethernet/cavium/liquidio/octeon_device.c app_name, CVM_CAST64(cs->corefreq)); cs 546 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, cs 552 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); cs 553 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c cs->cam_mask |= (1ULL << cs->cam_index); cs 554 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c cs->cam_index++; cs 2638 drivers/net/ethernet/marvell/mv643xx_eth.c const struct mbus_dram_window *cs = dram->cs + i; cs 2640 drivers/net/ethernet/marvell/mv643xx_eth.c writel((cs->base & 0xffff0000) | cs 2641 drivers/net/ethernet/marvell/mv643xx_eth.c (cs->mbus_attr << 8) | cs 2643 drivers/net/ethernet/marvell/mv643xx_eth.c writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); cs 4439 drivers/net/ethernet/marvell/mvneta.c const struct mbus_dram_window *cs = dram->cs + i; cs 4442 drivers/net/ethernet/marvell/mvneta.c (cs->base & 0xffff0000) | cs 4443 drivers/net/ethernet/marvell/mvneta.c (cs->mbus_attr << 8) | cs 4447 drivers/net/ethernet/marvell/mvneta.c (cs->size - 1) & 0xffff0000); cs 5478 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c const struct mbus_dram_window *cs = dram->cs + i; cs 5481 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | cs 5485 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c (cs->size - 1) & 0xffff0000); cs 342 drivers/net/ethernet/pasemi/pasemi_mac.c mac->cs[0] = pasemi_mac_setup_csring(mac); cs 344 drivers/net/ethernet/pasemi/pasemi_mac.c mac->cs[1] = pasemi_mac_setup_csring(mac); cs 346 drivers/net/ethernet/pasemi/pasemi_mac.c mac->cs[1] = 0; cs 349 drivers/net/ethernet/pasemi/pasemi_mac.c if (mac->cs[i]) cs 1323 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_mac_free_csring(mac->cs[i]); cs 1324 drivers/net/ethernet/pasemi/pasemi_mac.c mac->cs[i] = NULL; cs 1494 drivers/net/ethernet/pasemi/pasemi_mac.c csring = mac->cs[mac->last_cs]; cs 78 drivers/net/ethernet/pasemi/pasemi_mac.h struct pasemi_mac_csring *cs[MAX_CS]; cs 99 drivers/net/ethernet/pasemi/pasemi_mac.h #define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)]) cs 3589 drivers/net/ethernet/sun/niu.c u64 cs; cs 3594 drivers/net/ethernet/sun/niu.c cs = rp->tx_cs; cs 3595 drivers/net/ethernet/sun/niu.c if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) cs 3598 drivers/net/ethernet/sun/niu.c tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; cs 3839 drivers/net/ethernet/sun/niu.c u64 cs) cs 3843 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_MBOX_ERR) cs 3845 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_PKT_SIZE_ERR) cs 3847 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_TX_RING_OFLOW) cs 3849 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_PREF_BUF_PAR_ERR) cs 3851 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_NACK_PREF) cs 3853 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_NACK_PKT_RD) cs 3855 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_CONF_PART_ERR) cs 3857 drivers/net/ethernet/sun/niu.c if (cs & TX_CS_PKT_PRT_ERR) cs 3865 drivers/net/ethernet/sun/niu.c u64 cs, logh, logl; cs 3867 drivers/net/ethernet/sun/niu.c cs = nr64(TX_CS(rp->tx_channel)); cs 3873 drivers/net/ethernet/sun/niu.c (unsigned long long)cs, cs 3877 drivers/net/ethernet/sun/niu.c niu_log_txchan_errors(np, rp, cs); cs 374 drivers/net/fjes/fjes_hw.c union REG_CS cs; cs 385 drivers/net/fjes/fjes_hw.c cs.reg = rd32(XSCT_CS); cs 387 drivers/net/fjes/fjes_hw.c while ((cs.bits.complete != 1) && timeout > 0) { cs 389 drivers/net/fjes/fjes_hw.c cs.reg = rd32(XSCT_CS); cs 393 drivers/net/fjes/fjes_hw.c if (cs.bits.complete == 1) cs 412 drivers/net/fjes/fjes_hw.c trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret); cs 19 drivers/net/fjes/fjes_trace.h TP_PROTO(union REG_CR *cr, union REG_CS *cs, int timeout, cs 21 drivers/net/fjes/fjes_trace.h TP_ARGS(cr, cs, timeout, ret), cs 38 drivers/net/fjes/fjes_trace.h __entry->cs_req = cs->bits.req_code; cs 39 drivers/net/fjes/fjes_trace.h __entry->cs_busy = cs->bits.busy; cs 40 drivers/net/fjes/fjes_trace.h __entry->cs_complete = cs->bits.complete; cs 1060 drivers/net/hamradio/baycom_epp.c hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT); cs 1061 drivers/net/hamradio/baycom_epp.c hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT); cs 1062 drivers/net/hamradio/baycom_epp.c hi.data.cs.ptt_keyed = bc->ptt_keyed; cs 1063 drivers/net/hamradio/baycom_epp.c hi.data.cs.tx_packets = dev->stats.tx_packets; cs 1064 drivers/net/hamradio/baycom_epp.c hi.data.cs.tx_errors = dev->stats.tx_errors; cs 1065 drivers/net/hamradio/baycom_epp.c hi.data.cs.rx_packets = dev->stats.rx_packets; cs 1066 drivers/net/hamradio/baycom_epp.c hi.data.cs.rx_errors = dev->stats.rx_errors; cs 547 drivers/net/hamradio/hdlcdrv.c bi.data.cs.ptt = hdlcdrv_ptt(s); cs 548 drivers/net/hamradio/hdlcdrv.c bi.data.cs.dcd = s->hdlcrx.dcd; cs 549 drivers/net/hamradio/hdlcdrv.c bi.data.cs.ptt_keyed = s->ptt_keyed; cs 550 drivers/net/hamradio/hdlcdrv.c bi.data.cs.tx_packets = dev->stats.tx_packets; cs 551 drivers/net/hamradio/hdlcdrv.c bi.data.cs.tx_errors = dev->stats.tx_errors; cs 552 drivers/net/hamradio/hdlcdrv.c bi.data.cs.rx_packets = dev->stats.rx_packets; cs 553 drivers/net/hamradio/hdlcdrv.c bi.data.cs.rx_errors = dev->stats.rx_errors; cs 232 drivers/net/slip/slhc.c struct cstate *cs = lcs->next; cs 297 drivers/net/slip/slhc.c if( ip->saddr == cs->cs_ip.saddr cs 298 drivers/net/slip/slhc.c && ip->daddr == cs->cs_ip.daddr cs 299 drivers/net/slip/slhc.c && th->source == cs->cs_tcp.source cs 300 drivers/net/slip/slhc.c && th->dest == cs->cs_tcp.dest) cs 304 drivers/net/slip/slhc.c if ( cs == ocs ) cs 306 drivers/net/slip/slhc.c lcs = cs; cs 307 drivers/net/slip/slhc.c cs = cs->next; cs 329 drivers/net/slip/slhc.c } else if (cs == ocs) { cs 334 drivers/net/slip/slhc.c lcs->next = cs->next; cs 335 drivers/net/slip/slhc.c cs->next = ocs->next; cs 336 drivers/net/slip/slhc.c ocs->next = cs; cs 351 drivers/net/slip/slhc.c oth = &cs->cs_tcp; cs 353 drivers/net/slip/slhc.c if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl cs 354 drivers/net/slip/slhc.c || ip->tos != cs->cs_ip.tos cs 355 drivers/net/slip/slhc.c || (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000)) cs 356 drivers/net/slip/slhc.c || ip->ttl != cs->cs_ip.ttl cs 357 drivers/net/slip/slhc.c || th->doff != cs->cs_tcp.doff cs 358 drivers/net/slip/slhc.c || (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0) cs 359 drivers/net/slip/slhc.c || (th->doff > 5 && memcmp(th+1,cs->cs_tcpopt,((th->doff)-5)*4) != 0)){ cs 405 drivers/net/slip/slhc.c if(ip->tot_len != cs->cs_ip.tot_len && cs 406 drivers/net/slip/slhc.c ntohs(cs->cs_ip.tot_len) == hlen) cs 417 drivers/net/slip/slhc.c deltaS == ntohs(cs->cs_ip.tot_len) - hlen){ cs 424 drivers/net/slip/slhc.c if(deltaS == ntohs(cs->cs_ip.tot_len) - hlen){ cs 431 drivers/net/slip/slhc.c deltaS = ntohs(ip->id) - ntohs(cs->cs_ip.id); cs 442 drivers/net/slip/slhc.c memcpy(&cs->cs_ip,ip,20); cs 443 drivers/net/slip/slhc.c memcpy(&cs->cs_tcp,th,20); cs 451 drivers/net/slip/slhc.c if(compress_cid == 0 || comp->xmit_current != cs->cs_this){ cs 455 drivers/net/slip/slhc.c *cp++ = cs->cs_this; cs 456 drivers/net/slip/slhc.c comp->xmit_current = cs->cs_this; cs 476 drivers/net/slip/slhc.c memcpy(&cs->cs_ip,ip,20); cs 477 drivers/net/slip/slhc.c memcpy(&cs->cs_tcp,th,20); cs 479 drivers/net/slip/slhc.c memcpy(cs->cs_ipopt, ip+1, ((ip->ihl) - 5) * 4); cs 481 drivers/net/slip/slhc.c memcpy(cs->cs_tcpopt, th+1, ((th->doff) - 5) * 4); cs 482 drivers/net/slip/slhc.c comp->xmit_current = cs->cs_this; cs 486 drivers/net/slip/slhc.c ocp[9] = cs->cs_this; cs 499 drivers/net/slip/slhc.c struct cstate *cs; cs 533 drivers/net/slip/slhc.c cs = &comp->rstate[comp->recv_current]; cs 534 drivers/net/slip/slhc.c thp = &cs->cs_tcp; cs 535 drivers/net/slip/slhc.c ip = &cs->cs_ip; cs 621 drivers/net/slip/slhc.c memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4); cs 632 drivers/net/slip/slhc.c memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4); cs 646 drivers/net/slip/slhc.c struct cstate *cs; cs 677 drivers/net/slip/slhc.c cs = &comp->rstate[comp->recv_current = index]; cs 679 drivers/net/slip/slhc.c memcpy(&cs->cs_ip,icp,20); cs 680 drivers/net/slip/slhc.c memcpy(&cs->cs_tcp,icp + ihl*4,20); cs 682 drivers/net/slip/slhc.c memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4); cs 683 drivers/net/slip/slhc.c if (cs->cs_tcp.doff > 5) cs 684 drivers/net/slip/slhc.c memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); cs 685 drivers/net/slip/slhc.c cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; cs 686 drivers/net/slip/slhc.c cs->initialized = true; cs 519 drivers/net/wimax/i2400m/netdev.c enum i2400m_cs cs) cs 525 drivers/net/wimax/i2400m/netdev.c i2400m, skb, skb->len, cs); cs 526 drivers/net/wimax/i2400m/netdev.c switch(cs) { cs 539 drivers/net/wimax/i2400m/netdev.c dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs); cs 549 drivers/net/wimax/i2400m/netdev.c i2400m, skb, skb->len, cs); cs 469 drivers/net/wimax/i2400m/rx.c enum i2400m_cs cs; /* packet type for the skb */ cs 766 drivers/net/wimax/i2400m/rx.c i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); cs 801 drivers/net/wimax/i2400m/rx.c i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); cs 907 drivers/net/wimax/i2400m/rx.c i2400m_net_erx(i2400m, skb, roq_data->cs); cs 987 drivers/net/wimax/i2400m/rx.c enum i2400m_cs cs; cs 1025 drivers/net/wimax/i2400m/rx.c cs = hdr->cs; cs 1043 drivers/net/wimax/i2400m/rx.c roq_data->cs = cs; cs 1073 drivers/net/wimax/i2400m/rx.c i2400m_net_erx(i2400m, skb, cs); cs 1549 drivers/net/wireless/ath/wil6210/cfg80211.c struct wil_sta_info *cs, cs 1555 drivers/net/wireless/ath/wil6210/cfg80211.c if (!cs) cs 1562 drivers/net/wireless/ath/wil6210/cfg80211.c cc = &cs->tid_crypto_rx[tid].key_id[key_index]; cs 1572 drivers/net/wireless/ath/wil6210/cfg80211.c cc = &cs->group_crypto_rx.key_id[key_index]; cs 1585 drivers/net/wireless/ath/wil6210/cfg80211.c struct wil_sta_info *cs) cs 1590 drivers/net/wireless/ath/wil6210/cfg80211.c if (!cs) cs 1596 drivers/net/wireless/ath/wil6210/cfg80211.c cc = &cs->tid_crypto_rx[tid].key_id[key_index]; cs 1601 drivers/net/wireless/ath/wil6210/cfg80211.c cc = &cs->group_crypto_rx.key_id[key_index]; cs 1620 drivers/net/wireless/ath/wil6210/cfg80211.c struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid, cs 1633 drivers/net/wireless/ath/wil6210/cfg80211.c if (IS_ERR(cs)) { cs 1645 drivers/net/wireless/ath/wil6210/cfg80211.c if (!IS_ERR(cs)) cs 1646 drivers/net/wireless/ath/wil6210/cfg80211.c wil_del_rx_key(key_index, key_usage, cs); cs 1669 drivers/net/wireless/ath/wil6210/cfg80211.c if (!rc && !IS_ERR(cs)) { cs 1680 drivers/net/wireless/ath/wil6210/cfg80211.c wil_set_crypto_rx(key_index, key_usage, cs, params); cs 1695 drivers/net/wireless/ath/wil6210/cfg80211.c struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid, cs 1702 drivers/net/wireless/ath/wil6210/cfg80211.c if (IS_ERR(cs)) cs 1706 drivers/net/wireless/ath/wil6210/cfg80211.c if (!IS_ERR_OR_NULL(cs)) cs 1707 drivers/net/wireless/ath/wil6210/cfg80211.c wil_del_rx_key(key_index, key_usage, cs); cs 658 drivers/net/wireless/ath/wil6210/txrx.c int reverse_memcmp(const void *cs, const void *ct, size_t count) cs 663 drivers/net/wireless/ath/wil6210/txrx.c for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0; cs 1393 drivers/net/wireless/ath/wil6210/wil6210.h struct wil_sta_info *cs, cs 1436 drivers/net/wireless/ath/wil6210/wil6210.h int reverse_memcmp(const void *cs, const void *ct, size_t count); cs 194 drivers/net/wireless/intel/iwlwifi/fw/img.h struct iwl_fw_cipher_scheme cs[]; cs 286 drivers/net/wireless/intel/iwlwifi/fw/img.h struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; cs 384 drivers/net/wireless/intel/iwlwifi/iwl-drv.c len < sizeof(l->size) + l->size * sizeof(l->cs[0])) cs 388 drivers/net/wireless/intel/iwlwifi/iwl-drv.c fwcs = &l->cs[j]; cs 394 drivers/net/wireless/intel/iwlwifi/iwl-drv.c fw->cs[j++] = *fwcs; cs 486 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c if (mvm->fw->cs[0].cipher) { cs 487 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; cs 488 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; cs 492 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->cipher = le32_to_cpu(fwcs->cipher); cs 493 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->iftype = BIT(NL80211_IFTYPE_STATION); cs 494 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->hdr_len = fwcs->hdr_len; cs 495 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->pn_len = fwcs->pn_len; cs 496 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->pn_off = fwcs->pn_off; cs 497 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->key_idx_off = fwcs->key_idx_off; cs 498 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->key_idx_mask = fwcs->key_idx_mask; cs 499 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->key_idx_shift = fwcs->key_idx_shift; cs 500 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c cs->mic_len = fwcs->mic_len; cs 502 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c mvm->hw->cipher_schemes = mvm->cs; cs 503 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; cs 1115 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; cs 813 drivers/net/wireless/marvell/mwifiex/sdio.c u8 cs; cs 816 drivers/net/wireless/marvell/mwifiex/sdio.c if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs)) cs 818 drivers/net/wireless/marvell/mwifiex/sdio.c else if ((cs & bits) == bits) cs 3139 drivers/net/wireless/realtek/rtw88/rtw8822c.c u32 pd, cs; cs 3147 drivers/net/wireless/realtek/rtw88/rtw8822c.c cs = rtw_read32_mask(rtwdev, cs 3151 drivers/net/wireless/realtek/rtw88/rtw8822c.c cs += cs_diff; cs 3154 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (cs == RTW_CCK_CS_ERR1 || cs == RTW_CCK_CS_ERR2) cs 3155 drivers/net/wireless/realtek/rtw88/rtw8822c.c cs++; cs 3156 drivers/net/wireless/realtek/rtw88/rtw8822c.c else if (cs > RTW_CCK_CS_MAX) cs 3157 drivers/net/wireless/realtek/rtw88/rtw8822c.c cs = RTW_CCK_CS_MAX; cs 3165 drivers/net/wireless/realtek/rtw88/rtw8822c.c cs); cs 642 drivers/net/wireless/zydas/zd1211rw/zd_mac.c static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, cs 652 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->control = 0; cs 656 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->control |= ZD_CS_NEED_RANDOM_BACKOFF; cs 660 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->control |= ZD_CS_NO_ACK; cs 664 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->control |= ZD_CS_PS_POLL_FRAME; cs 667 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->control |= ZD_CS_RTS; cs 670 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->control |= ZD_CS_SELF_CTS; cs 858 drivers/net/wireless/zydas/zd1211rw/zd_mac.c struct zd_ctrlset *cs = skb_push(skb, sizeof(struct zd_ctrlset)); cs 873 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->modulation = txrate->hw_value; cs 875 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->modulation = txrate->hw_value_short; cs 877 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->tx_length = cpu_to_le16(frag_len); cs 879 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs_set_control(mac, cs, hdr, info); cs 886 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ? cs 902 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->service = 0; cs 903 drivers/net/wireless/zydas/zd1211rw/zd_mac.c r = zd_calc_tx_length_us(&cs->service, ZD_RATE(cs->modulation), cs 904 drivers/net/wireless/zydas/zd1211rw/zd_mac.c le16_to_cpu(cs->tx_length)); cs 907 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->current_length = cpu_to_le16(r); cs 908 drivers/net/wireless/zydas/zd1211rw/zd_mac.c cs->next_frame_length = 0; cs 183 drivers/pci/controller/pci-mvebu.c const struct mbus_dram_window *cs = dram->cs + i; cs 185 drivers/pci/controller/pci-mvebu.c mvebu_writel(port, cs->base & 0xffff0000, cs 189 drivers/pci/controller/pci-mvebu.c ((cs->size - 1) & 0xffff0000) | cs 190 drivers/pci/controller/pci-mvebu.c (cs->mbus_attr << 8) | cs 194 drivers/pci/controller/pci-mvebu.c size += cs->size; cs 202 drivers/pci/controller/pci-mvebu.c mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); cs 357 drivers/pci/controller/pci-tegra.c struct resource cs; cs 915 drivers/pci/controller/pci-tegra.c size = resource_size(&pcie->cs); cs 916 drivers/pci/controller/pci-tegra.c afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); cs 1539 drivers/pci/controller/pci-tegra.c pcie->cs = *res; cs 1542 drivers/pci/controller/pci-tegra.c pcie->cs.end = pcie->cs.start + SZ_4K - 1; cs 1544 drivers/pci/controller/pci-tegra.c pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); cs 661 drivers/pinctrl/pinctrl-mcp23s08.c unsigned int base, int cs) cs 1370 drivers/pinctrl/sh-pfc/pfc-r8a7778.c #define HSPI_PFC_DAT(name, clk, cs, rx, tx) SH_PFC_MUX4(name, clk, cs, rx, tx) cs 1181 drivers/power/supply/ab8500_fg.c struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale; cs 1184 drivers/power/supply/ab8500_fg.c if (!cs->enable) cs 1192 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[0] = 100; cs 1193 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[1] = cs 1196 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[0], cs->cap_to_scale[1]); cs 1200 drivers/power/supply/ab8500_fg.c if ((cs->cap_to_scale[0] != cs->cap_to_scale[1]) cs 1201 drivers/power/supply/ab8500_fg.c && (cs->cap_to_scale[1] > 0)) cs 1204 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[0], cs 1205 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[1])); cs 1208 drivers/power/supply/ab8500_fg.c if (capacity < cs->disable_cap_level) { cs 1209 drivers/power/supply/ab8500_fg.c cs->disable_cap_level = capacity; cs 1211 drivers/power/supply/ab8500_fg.c cs->disable_cap_level); cs 1214 drivers/power/supply/ab8500_fg.c cs->disable_cap_level) { cs 1216 drivers/power/supply/ab8500_fg.c cs->enable = false; cs 1221 drivers/power/supply/ab8500_fg.c cs->disable_cap_level); cs 1222 drivers/power/supply/ab8500_fg.c capacity = cs->disable_cap_level; cs 1239 drivers/power/supply/ab8500_fg.c struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale; cs 1241 drivers/power/supply/ab8500_fg.c if (!cs->enable) cs 1249 drivers/power/supply/ab8500_fg.c if (cs->scaled_cap != 100) { cs 1250 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[0] = cs->scaled_cap; cs 1251 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[1] = di->bat_cap.prev_percent; cs 1253 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[0] = 100; cs 1254 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[1] = cs 1260 drivers/power/supply/ab8500_fg.c cs->cap_to_scale[0], cs->cap_to_scale[1]); cs 61 drivers/ps3/ps3av.c u32 cs; cs 506 drivers/ps3/ps3av.c av_video_cs = video_mode->cs; cs 685 drivers/ps3/ps3av.c pr_debug("color space rgb: %02x\n", info->cs.rgb); cs 686 drivers/ps3/ps3av.c pr_debug("color space yuv444: %02x\n", info->cs.yuv444); cs 687 drivers/ps3/ps3av.c pr_debug("color space yuv422: %02x\n", info->cs.yuv422); cs 28 drivers/ps3/ps3av_cmd.c int cs; cs 33 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_RGB_8, cs 37 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_RGB_10, cs 41 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_RGB_12, cs 45 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_YUV444_8, cs 49 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_YUV444_10, cs 53 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_YUV444_12, cs 57 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_YUV422_8, cs 61 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_YUV422_10, cs 65 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_YUV422_12, cs 69 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_XVYCC_8, cs 73 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_XVYCC_10, cs 77 drivers/ps3/ps3av_cmd.c .cs = PS3AV_CMD_VIDEO_CS_XVYCC_12, cs 83 drivers/ps3/ps3av_cmd.c static u32 ps3av_cs_video2av(int cs) cs 88 drivers/ps3/ps3av_cmd.c if (ps3av_cs_video2av_table[i].cs == cs) cs 94 drivers/ps3/ps3av_cmd.c static u32 ps3av_cs_video2av_bitlen(int cs) cs 99 drivers/ps3/ps3av_cmd.c if (ps3av_cs_video2av_table[i].cs == cs) cs 76 drivers/ptp/ptp_kvm.c system_counter->cs = &kvm_clock; cs 220 drivers/s390/char/raw3270.h struct string *cs, *tmp; cs 224 drivers/s390/char/raw3270.h list_for_each_entry(cs, free_list, list) { cs 225 drivers/s390/char/raw3270.h if (cs->size < size) cs 227 drivers/s390/char/raw3270.h if (cs->size > size + sizeof(struct string)) { cs 228 drivers/s390/char/raw3270.h char *endaddr = (char *) (cs + 1) + cs->size; cs 231 drivers/s390/char/raw3270.h cs->size -= size + sizeof(struct string); cs 232 drivers/s390/char/raw3270.h cs = tmp; cs 234 drivers/s390/char/raw3270.h list_del(&cs->list); cs 235 drivers/s390/char/raw3270.h cs->len = len; cs 236 drivers/s390/char/raw3270.h INIT_LIST_HEAD(&cs->list); cs 237 drivers/s390/char/raw3270.h INIT_LIST_HEAD(&cs->update); cs 238 drivers/s390/char/raw3270.h return cs; cs 244 drivers/s390/char/raw3270.h free_string(struct list_head *free_list, struct string *cs) cs 252 drivers/s390/char/raw3270.h if (list_entry(p, struct string, list) > cs) cs 259 drivers/s390/char/raw3270.h if ((char *) (cs + 1) + cs->size == (char *) tmp) { cs 261 drivers/s390/char/raw3270.h cs->size += tmp->size + sizeof(struct string); cs 267 drivers/s390/char/raw3270.h if ((char *) (tmp + 1) + tmp->size == (char *) cs) { cs 268 drivers/s390/char/raw3270.h tmp->size += cs->size + sizeof(struct string); cs 272 drivers/s390/char/raw3270.h __list_add(&cs->list, left, left->next); cs 273 drivers/s390/char/raw3270.h return cs->size; cs 279 drivers/s390/char/raw3270.h struct string *cs; cs 281 drivers/s390/char/raw3270.h cs = (struct string *) mem; cs 282 drivers/s390/char/raw3270.h cs->size = size - sizeof(struct string); cs 283 drivers/s390/char/raw3270.h free_string(free_list, cs); cs 1156 drivers/scsi/aic7xxx/aic79xx.h struct cs *critical_sections; cs 3465 drivers/scsi/aic7xxx/aic79xx_core.c struct cs *cs; cs 3472 drivers/scsi/aic7xxx/aic79xx_core.c cs = ahd->critical_sections; cs 3473 drivers/scsi/aic7xxx/aic79xx_core.c for (i = 0; i < ahd->num_critical_sections; i++, cs++) { cs 3475 drivers/scsi/aic7xxx/aic79xx_core.c if (cs->begin < seqaddr && cs->end >= seqaddr) cs 9302 drivers/scsi/aic7xxx/aic79xx_core.c struct cs cs_table[NUM_CRITICAL_SECTIONS]; cs 9444 drivers/scsi/aic7xxx/aic79xx_core.c cs_count *= sizeof(struct cs); cs 999 drivers/scsi/aic7xxx/aic7xxx.h struct cs *critical_sections; cs 1996 drivers/scsi/aic7xxx/aic7xxx_core.c struct cs *cs; cs 2010 drivers/scsi/aic7xxx/aic7xxx_core.c cs = ahc->critical_sections; cs 2011 drivers/scsi/aic7xxx/aic7xxx_core.c for (i = 0; i < ahc->num_critical_sections; i++, cs++) { cs 2013 drivers/scsi/aic7xxx/aic7xxx_core.c if (cs->begin < seqaddr && cs->end >= seqaddr) cs 6818 drivers/scsi/aic7xxx/aic7xxx_core.c struct cs cs_table[NUM_CRITICAL_SECTIONS]; cs 6909 drivers/scsi/aic7xxx/aic7xxx_core.c cs_count *= sizeof(struct cs); cs 353 drivers/scsi/aic7xxx/aicasm/aicasm.c critical_section_t *cs; cs 443 drivers/scsi/aic7xxx/aicasm/aicasm.c for (cs = TAILQ_FIRST(&cs_tailq); cs 444 drivers/scsi/aic7xxx/aicasm/aicasm.c cs != NULL; cs 445 drivers/scsi/aic7xxx/aicasm/aicasm.c cs = TAILQ_NEXT(cs, links)) { cs 447 drivers/scsi/aic7xxx/aicasm/aicasm.c cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n", cs 448 drivers/scsi/aic7xxx/aicasm/aicasm.c cs->begin_addr, cs->end_addr); cs 1001 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y critical_section_t *cs; cs 1008 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y cs = cs_alloc(); cs 1009 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y cs->begin_addr = instruction_ptr; cs 1017 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y critical_section_t *cs; cs 1023 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y cs = TAILQ_LAST(&cs_tailq, cs_tailq); cs 1024 drivers/scsi/aic7xxx/aicasm/aicasm_gram.y cs->end_addr = instruction_ptr; cs 295 drivers/scsi/arm/acornscsi.c acornscsi_csdelay(unsigned int cs) cs 299 drivers/scsi/arm/acornscsi.c target_jiffies = jiffies + 1 + cs * HZ / 100; cs 725 drivers/scsi/myrb.h void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk); cs 104 drivers/scsi/myrs.c static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) cs 106 drivers/scsi/myrs.c void __iomem *base = cs->io_base; cs 108 drivers/scsi/myrs.c union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; cs 110 drivers/scsi/myrs.c cs->write_cmd_mbox(next_mbox, mbox); cs 112 drivers/scsi/myrs.c if (cs->prev_cmd_mbox1->words[0] == 0 || cs 113 drivers/scsi/myrs.c cs->prev_cmd_mbox2->words[0] == 0) cs 114 drivers/scsi/myrs.c cs->get_cmd_mbox(base); cs 116 drivers/scsi/myrs.c cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; cs 117 drivers/scsi/myrs.c cs->prev_cmd_mbox1 = next_mbox; cs 119 drivers/scsi/myrs.c if (++next_mbox > cs->last_cmd_mbox) cs 120 drivers/scsi/myrs.c next_mbox = cs->first_cmd_mbox; cs 122 drivers/scsi/myrs.c cs->next_cmd_mbox = next_mbox; cs 128 drivers/scsi/myrs.c static void myrs_exec_cmd(struct myrs_hba *cs, cs 135 drivers/scsi/myrs.c spin_lock_irqsave(&cs->queue_lock, flags); cs 136 drivers/scsi/myrs.c myrs_qcmd(cs, cmd_blk); cs 137 drivers/scsi/myrs.c spin_unlock_irqrestore(&cs->queue_lock, flags); cs 146 drivers/scsi/myrs.c static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num, cs 150 drivers/scsi/myrs.c shost_printk(KERN_INFO, cs->host, cs 159 drivers/scsi/myrs.c static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs) cs 161 drivers/scsi/myrs.c struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; cs 168 drivers/scsi/myrs.c ldev_present = cs->ctlr_info->ldev_present; cs 169 drivers/scsi/myrs.c ldev_critical = cs->ctlr_info->ldev_critical; cs 170 drivers/scsi/myrs.c ldev_offline = cs->ctlr_info->ldev_offline; cs 172 drivers/scsi/myrs.c ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info, cs 175 drivers/scsi/myrs.c if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr)) cs 178 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 190 drivers/scsi/myrs.c dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n"); cs 191 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 193 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 194 drivers/scsi/myrs.c dma_unmap_single(&cs->pdev->dev, ctlr_info_addr, cs 197 drivers/scsi/myrs.c if (cs->ctlr_info->bg_init_active + cs 198 drivers/scsi/myrs.c cs->ctlr_info->ldev_init_active + cs 199 drivers/scsi/myrs.c cs->ctlr_info->pdev_init_active + cs 200 drivers/scsi/myrs.c cs->ctlr_info->cc_active + cs 201 drivers/scsi/myrs.c cs->ctlr_info->rbld_active + cs 202 drivers/scsi/myrs.c cs->ctlr_info->exp_active != 0) cs 203 drivers/scsi/myrs.c cs->needs_update = true; cs 204 drivers/scsi/myrs.c if (cs->ctlr_info->ldev_present != ldev_present || cs 205 drivers/scsi/myrs.c cs->ctlr_info->ldev_critical != ldev_critical || cs 206 drivers/scsi/myrs.c cs->ctlr_info->ldev_offline != ldev_offline) cs 207 drivers/scsi/myrs.c shost_printk(KERN_INFO, cs->host, cs 209 drivers/scsi/myrs.c cs->ctlr_info->ldev_critical, cs 210 drivers/scsi/myrs.c cs->ctlr_info->ldev_offline, cs 211 drivers/scsi/myrs.c cs->ctlr_info->ldev_present); cs 220 drivers/scsi/myrs.c static unsigned char myrs_get_ldev_info(struct myrs_hba *cs, cs 223 drivers/scsi/myrs.c struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; cs 231 drivers/scsi/myrs.c ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info, cs 234 drivers/scsi/myrs.c if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr)) cs 237 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 249 drivers/scsi/myrs.c dev_dbg(&cs->host->shost_gendev, cs 251 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 253 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 254 drivers/scsi/myrs.c dma_unmap_single(&cs->pdev->dev, ldev_info_addr, cs 266 drivers/scsi/myrs.c shost_printk(KERN_INFO, cs->host, cs 273 drivers/scsi/myrs.c shost_printk(KERN_INFO, cs->host, cs 279 drivers/scsi/myrs.c myrs_report_progress(cs, ldev_num, cs 283 drivers/scsi/myrs.c myrs_report_progress(cs, ldev_num, cs 287 drivers/scsi/myrs.c myrs_report_progress(cs, ldev_num, cs 291 drivers/scsi/myrs.c myrs_report_progress(cs, ldev_num, cs 295 drivers/scsi/myrs.c shost_printk(KERN_INFO, cs->host, cs 307 drivers/scsi/myrs.c static unsigned char myrs_get_pdev_info(struct myrs_hba *cs, cs 311 drivers/scsi/myrs.c struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; cs 317 drivers/scsi/myrs.c pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info, cs 320 drivers/scsi/myrs.c if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr)) cs 323 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 337 drivers/scsi/myrs.c dev_dbg(&cs->host->shost_gendev, cs 340 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 342 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 343 drivers/scsi/myrs.c dma_unmap_single(&cs->pdev->dev, pdev_info_addr, cs 351 drivers/scsi/myrs.c static unsigned char myrs_dev_op(struct myrs_hba *cs, cs 354 drivers/scsi/myrs.c struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; cs 358 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 366 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 368 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 376 drivers/scsi/myrs.c static unsigned char myrs_translate_pdev(struct myrs_hba *cs, cs 380 drivers/scsi/myrs.c struct pci_dev *pdev = cs->pdev; cs 394 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 395 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 409 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 411 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 420 drivers/scsi/myrs.c static unsigned char myrs_get_event(struct myrs_hba *cs, cs 423 drivers/scsi/myrs.c struct pci_dev *pdev = cs->pdev; cs 425 drivers/scsi/myrs.c struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; cs 444 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 455 drivers/scsi/myrs.c static unsigned char myrs_get_fwstatus(struct myrs_hba *cs) cs 457 drivers/scsi/myrs.c struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; cs 470 drivers/scsi/myrs.c sgl->sge[0].sge_addr = cs->fwstat_addr; cs 472 drivers/scsi/myrs.c dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n"); cs 473 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 482 drivers/scsi/myrs.c static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, cs 485 drivers/scsi/myrs.c void __iomem *base = cs->io_base; cs 486 drivers/scsi/myrs.c struct pci_dev *pdev = cs->pdev; cs 506 drivers/scsi/myrs.c cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox); cs 507 drivers/scsi/myrs.c cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size, cs 508 drivers/scsi/myrs.c &cs->cmd_mbox_addr, GFP_KERNEL); cs 509 drivers/scsi/myrs.c if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) { cs 513 drivers/scsi/myrs.c cs->first_cmd_mbox = cmd_mbox; cs 515 drivers/scsi/myrs.c cs->last_cmd_mbox = cmd_mbox; cs 516 drivers/scsi/myrs.c cs->next_cmd_mbox = cs->first_cmd_mbox; cs 517 drivers/scsi/myrs.c cs->prev_cmd_mbox1 = cs->last_cmd_mbox; cs 518 drivers/scsi/myrs.c cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1; cs 521 drivers/scsi/myrs.c cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox); cs 522 drivers/scsi/myrs.c stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size, cs 523 drivers/scsi/myrs.c &cs->stat_mbox_addr, GFP_KERNEL); cs 524 drivers/scsi/myrs.c if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) { cs 529 drivers/scsi/myrs.c cs->first_stat_mbox = stat_mbox; cs 531 drivers/scsi/myrs.c cs->last_stat_mbox = stat_mbox; cs 532 drivers/scsi/myrs.c cs->next_stat_mbox = cs->first_stat_mbox; cs 534 drivers/scsi/myrs.c cs->fwstat_buf = dma_alloc_coherent(&pdev->dev, cs 536 drivers/scsi/myrs.c &cs->fwstat_addr, GFP_KERNEL); cs 537 drivers/scsi/myrs.c if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) { cs 539 drivers/scsi/myrs.c cs->fwstat_buf = NULL; cs 542 drivers/scsi/myrs.c cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), cs 544 drivers/scsi/myrs.c if (!cs->ctlr_info) cs 547 drivers/scsi/myrs.c cs->event_buf = kzalloc(sizeof(struct myrs_event), cs 549 drivers/scsi/myrs.c if (!cs->event_buf) cs 566 drivers/scsi/myrs.c mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr; cs 567 drivers/scsi/myrs.c mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr; cs 568 drivers/scsi/myrs.c mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr; cs 583 drivers/scsi/myrs.c static int myrs_get_config(struct myrs_hba *cs) cs 585 drivers/scsi/myrs.c struct myrs_ctlr_info *info = cs->ctlr_info; cs 586 drivers/scsi/myrs.c struct Scsi_Host *shost = cs->host; cs 593 drivers/scsi/myrs.c mutex_lock(&cs->cinfo_mutex); cs 594 drivers/scsi/myrs.c status = myrs_get_ctlr_info(cs); cs 595 drivers/scsi/myrs.c mutex_unlock(&cs->cinfo_mutex); cs 611 drivers/scsi/myrs.c strcpy(cs->model_name, "DAC960 "); cs 612 drivers/scsi/myrs.c strcat(cs->model_name, model); cs 814 drivers/scsi/myrs.c static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev) cs 819 drivers/scsi/myrs.c struct Scsi_Host *shost = cs->host; cs 859 drivers/scsi/myrs.c sdev->channel < cs->ctlr_info->physchan_present) { cs 886 drivers/scsi/myrs.c cs->needs_update = true; cs 892 drivers/scsi/myrs.c cs->needs_update = true; cs 916 drivers/scsi/myrs.c if (cs->disable_enc_msg) cs 941 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 947 drivers/scsi/myrs.c if (sdev->channel >= cs->ctlr_info->physchan_present) { cs 976 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 993 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) { cs 1004 drivers/scsi/myrs.c status = myrs_translate_pdev(cs, sdev->channel, sdev->id, cs 1020 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 1021 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 1031 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 1033 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 1035 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) { cs 1060 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1066 drivers/scsi/myrs.c if (sdev->channel >= cs->ctlr_info->physchan_present) { cs 1086 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1091 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) cs 1096 drivers/scsi/myrs.c status = myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 1115 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1123 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) cs 1135 drivers/scsi/myrs.c status = myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 1154 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 1155 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 1169 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 1171 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 1191 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1196 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) cs 1203 drivers/scsi/myrs.c status = myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 1216 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1224 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) cs 1236 drivers/scsi/myrs.c status = myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 1256 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 1257 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 1273 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 1275 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 1303 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1306 drivers/scsi/myrs.c memcpy(serial, cs->ctlr_info->serial_number, 16); cs 1316 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1318 drivers/scsi/myrs.c return snprintf(buf, 20, "%d\n", cs->host->host_no); cs 1339 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1343 drivers/scsi/myrs.c struct myrs_ctlr_info *info = cs->ctlr_info; cs 1391 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1393 drivers/scsi/myrs.c return snprintf(buf, 28, "%s\n", cs->model_name); cs 1401 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1403 drivers/scsi/myrs.c return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type); cs 1411 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1413 drivers/scsi/myrs.c return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb); cs 1421 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1424 drivers/scsi/myrs.c cs->ctlr_info->fw_major_version, cs 1425 drivers/scsi/myrs.c cs->ctlr_info->fw_minor_version, cs 1426 drivers/scsi/myrs.c cs->ctlr_info->fw_turn_number); cs 1434 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1439 drivers/scsi/myrs.c mutex_lock(&cs->dcmd_mutex); cs 1440 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 1448 drivers/scsi/myrs.c myrs_exec_cmd(cs, cmd_blk); cs 1450 drivers/scsi/myrs.c mutex_unlock(&cs->dcmd_mutex); cs 1458 drivers/scsi/myrs.c cs->next_evseq = 0; cs 1459 drivers/scsi/myrs.c cs->needs_update = true; cs 1460 drivers/scsi/myrs.c queue_delayed_work(cs->work_q, &cs->monitor_work, 1); cs 1461 drivers/scsi/myrs.c flush_delayed_work(&cs->monitor_work); cs 1472 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1475 drivers/scsi/myrs.c status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, cs 1491 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1493 drivers/scsi/myrs.c return snprintf(buf, 3, "%d\n", cs->disable_enc_msg); cs 1500 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1510 drivers/scsi/myrs.c cs->disable_enc_msg = value; cs 1535 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1537 drivers/scsi/myrs.c cs->reset(cs->io_base); cs 1541 drivers/scsi/myrs.c static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd, cs 1587 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(shost); cs 1611 drivers/scsi/myrs.c if (scmd->device->channel >= cs->ctlr_info->physchan_present) { cs 1622 drivers/scsi/myrs.c myrs_mode_sense(cs, scmd, ldev_info); cs 1632 drivers/scsi/myrs.c cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC, cs 1640 drivers/scsi/myrs.c if (scmd->device->channel >= cs->ctlr_info->physchan_present) { cs 1676 drivers/scsi/myrs.c cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC, cs 1679 drivers/scsi/myrs.c dma_pool_free(cs->sense_pool, cmd_blk->sense, cs 1686 drivers/scsi/myrs.c if (scmd->device->channel >= cs->ctlr_info->physchan_present) { cs 1733 drivers/scsi/myrs.c hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC, cs 1737 drivers/scsi/myrs.c dma_pool_free(cs->dcdb_pool, cs 1743 drivers/scsi/myrs.c dma_pool_free(cs->sense_pool, cs 1774 drivers/scsi/myrs.c spin_lock_irqsave(&cs->queue_lock, flags); cs 1775 drivers/scsi/myrs.c myrs_qcmd(cs, cmd_blk); cs 1776 drivers/scsi/myrs.c spin_unlock_irqrestore(&cs->queue_lock, flags); cs 1781 drivers/scsi/myrs.c static unsigned short myrs_translate_ldev(struct myrs_hba *cs, cs 1786 drivers/scsi/myrs.c sdev->channel - cs->ctlr_info->physchan_present; cs 1795 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1801 drivers/scsi/myrs.c if (sdev->channel >= cs->ctlr_info->physchan_present) { cs 1808 drivers/scsi/myrs.c ldev_num = myrs_translate_ldev(cs, sdev); cs 1814 drivers/scsi/myrs.c status = myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 1876 drivers/scsi/myrs.c status = myrs_get_pdev_info(cs, sdev->channel, cs 1891 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1897 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present) { cs 1941 drivers/scsi/myrs.c struct myrs_hba *cs; cs 1949 drivers/scsi/myrs.c cs = shost_priv(shost); cs 1950 drivers/scsi/myrs.c mutex_init(&cs->dcmd_mutex); cs 1951 drivers/scsi/myrs.c mutex_init(&cs->cinfo_mutex); cs 1952 drivers/scsi/myrs.c cs->host = shost; cs 1954 drivers/scsi/myrs.c return cs; cs 1969 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1971 drivers/scsi/myrs.c return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0; cs 1982 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 1987 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) cs 1992 drivers/scsi/myrs.c status = myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 2007 drivers/scsi/myrs.c struct myrs_hba *cs = shost_priv(sdev->host); cs 2011 drivers/scsi/myrs.c if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) cs 2046 drivers/scsi/myrs.c void myrs_flush_cache(struct myrs_hba *cs) cs 2048 drivers/scsi/myrs.c myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER); cs 2051 drivers/scsi/myrs.c static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk, cs 2069 drivers/scsi/myrs.c dma_pool_free(cs->sense_pool, cmd_blk->sense, cs 2075 drivers/scsi/myrs.c dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb, cs 2081 drivers/scsi/myrs.c dma_pool_free(cs->sg_pool, cmd_blk->sgl, cs 2096 drivers/scsi/myrs.c static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) cs 2109 drivers/scsi/myrs.c struct myrs_hba *cs = container_of(work, struct myrs_hba, cs 2111 drivers/scsi/myrs.c struct Scsi_Host *shost = cs->host; cs 2112 drivers/scsi/myrs.c struct myrs_ctlr_info *info = cs->ctlr_info; cs 2113 drivers/scsi/myrs.c unsigned int epoch = cs->fwstat_buf->epoch; cs 2119 drivers/scsi/myrs.c status = myrs_get_fwstatus(cs); cs 2121 drivers/scsi/myrs.c if (cs->needs_update) { cs 2122 drivers/scsi/myrs.c cs->needs_update = false; cs 2123 drivers/scsi/myrs.c mutex_lock(&cs->cinfo_mutex); cs 2124 drivers/scsi/myrs.c status = myrs_get_ctlr_info(cs); cs 2125 drivers/scsi/myrs.c mutex_unlock(&cs->cinfo_mutex); cs 2127 drivers/scsi/myrs.c if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) { cs 2128 drivers/scsi/myrs.c status = myrs_get_event(cs, cs->next_evseq, cs 2129 drivers/scsi/myrs.c cs->event_buf); cs 2131 drivers/scsi/myrs.c myrs_log_event(cs, cs->event_buf); cs 2132 drivers/scsi/myrs.c cs->next_evseq++; cs 2137 drivers/scsi/myrs.c if (time_after(jiffies, cs->secondary_monitor_time cs 2139 drivers/scsi/myrs.c cs->secondary_monitor_time = jiffies; cs 2159 drivers/scsi/myrs.c myrs_get_ldev_info(cs, ldev_num, ldev_info); cs 2161 drivers/scsi/myrs.c cs->needs_update = true; cs 2163 drivers/scsi/myrs.c if (epoch == cs->epoch && cs 2164 drivers/scsi/myrs.c cs->fwstat_buf->next_evseq == cs->next_evseq && cs 2165 drivers/scsi/myrs.c (cs->needs_update == false || cs 2166 drivers/scsi/myrs.c time_before(jiffies, cs->primary_monitor_time cs 2172 drivers/scsi/myrs.c cs->primary_monitor_time = jiffies; cs 2173 drivers/scsi/myrs.c queue_delayed_work(cs->work_q, &cs->monitor_work, interval); cs 2176 drivers/scsi/myrs.c static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs) cs 2178 drivers/scsi/myrs.c struct Scsi_Host *shost = cs->host; cs 2183 drivers/scsi/myrs.c cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev, cs 2185 drivers/scsi/myrs.c if (cs->sg_pool == NULL) { cs 2191 drivers/scsi/myrs.c cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev, cs 2193 drivers/scsi/myrs.c if (cs->sense_pool == NULL) { cs 2194 drivers/scsi/myrs.c dma_pool_destroy(cs->sg_pool); cs 2195 drivers/scsi/myrs.c cs->sg_pool = NULL; cs 2201 drivers/scsi/myrs.c cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev, cs 2204 drivers/scsi/myrs.c if (!cs->dcdb_pool) { cs 2205 drivers/scsi/myrs.c dma_pool_destroy(cs->sg_pool); cs 2206 drivers/scsi/myrs.c cs->sg_pool = NULL; cs 2207 drivers/scsi/myrs.c dma_pool_destroy(cs->sense_pool); cs 2208 drivers/scsi/myrs.c cs->sense_pool = NULL; cs 2214 drivers/scsi/myrs.c snprintf(cs->work_q_name, sizeof(cs->work_q_name), cs 2216 drivers/scsi/myrs.c cs->work_q = create_singlethread_workqueue(cs->work_q_name); cs 2217 drivers/scsi/myrs.c if (!cs->work_q) { cs 2218 drivers/scsi/myrs.c dma_pool_destroy(cs->dcdb_pool); cs 2219 drivers/scsi/myrs.c cs->dcdb_pool = NULL; cs 2220 drivers/scsi/myrs.c dma_pool_destroy(cs->sg_pool); cs 2221 drivers/scsi/myrs.c cs->sg_pool = NULL; cs 2222 drivers/scsi/myrs.c dma_pool_destroy(cs->sense_pool); cs 2223 drivers/scsi/myrs.c cs->sense_pool = NULL; cs 2230 drivers/scsi/myrs.c INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor); cs 2231 drivers/scsi/myrs.c queue_delayed_work(cs->work_q, &cs->monitor_work, 1); cs 2236 drivers/scsi/myrs.c static void myrs_destroy_mempools(struct myrs_hba *cs) cs 2238 drivers/scsi/myrs.c cancel_delayed_work_sync(&cs->monitor_work); cs 2239 drivers/scsi/myrs.c destroy_workqueue(cs->work_q); cs 2241 drivers/scsi/myrs.c dma_pool_destroy(cs->sg_pool); cs 2242 drivers/scsi/myrs.c dma_pool_destroy(cs->dcdb_pool); cs 2243 drivers/scsi/myrs.c dma_pool_destroy(cs->sense_pool); cs 2246 drivers/scsi/myrs.c static void myrs_unmap(struct myrs_hba *cs) cs 2248 drivers/scsi/myrs.c kfree(cs->event_buf); cs 2249 drivers/scsi/myrs.c kfree(cs->ctlr_info); cs 2250 drivers/scsi/myrs.c if (cs->fwstat_buf) { cs 2251 drivers/scsi/myrs.c dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat), cs 2252 drivers/scsi/myrs.c cs->fwstat_buf, cs->fwstat_addr); cs 2253 drivers/scsi/myrs.c cs->fwstat_buf = NULL; cs 2255 drivers/scsi/myrs.c if (cs->first_stat_mbox) { cs 2256 drivers/scsi/myrs.c dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size, cs 2257 drivers/scsi/myrs.c cs->first_stat_mbox, cs->stat_mbox_addr); cs 2258 drivers/scsi/myrs.c cs->first_stat_mbox = NULL; cs 2260 drivers/scsi/myrs.c if (cs->first_cmd_mbox) { cs 2261 drivers/scsi/myrs.c dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size, cs 2262 drivers/scsi/myrs.c cs->first_cmd_mbox, cs->cmd_mbox_addr); cs 2263 drivers/scsi/myrs.c cs->first_cmd_mbox = NULL; cs 2267 drivers/scsi/myrs.c static void myrs_cleanup(struct myrs_hba *cs) cs 2269 drivers/scsi/myrs.c struct pci_dev *pdev = cs->pdev; cs 2272 drivers/scsi/myrs.c myrs_unmap(cs); cs 2274 drivers/scsi/myrs.c if (cs->mmio_base) { cs 2275 drivers/scsi/myrs.c cs->disable_intr(cs); cs 2276 drivers/scsi/myrs.c iounmap(cs->mmio_base); cs 2278 drivers/scsi/myrs.c if (cs->irq) cs 2279 drivers/scsi/myrs.c free_irq(cs->irq, cs); cs 2280 drivers/scsi/myrs.c if (cs->io_addr) cs 2281 drivers/scsi/myrs.c release_region(cs->io_addr, 0x80); cs 2282 drivers/scsi/myrs.c iounmap(cs->mmio_base); cs 2285 drivers/scsi/myrs.c scsi_host_put(cs->host); cs 2295 drivers/scsi/myrs.c struct myrs_hba *cs = NULL; cs 2297 drivers/scsi/myrs.c cs = myrs_alloc_host(pdev, entry); cs 2298 drivers/scsi/myrs.c if (!cs) { cs 2302 drivers/scsi/myrs.c cs->pdev = pdev; cs 2307 drivers/scsi/myrs.c cs->pci_addr = pci_resource_start(pdev, 0); cs 2309 drivers/scsi/myrs.c pci_set_drvdata(pdev, cs); cs 2310 drivers/scsi/myrs.c spin_lock_init(&cs->queue_lock); cs 2314 drivers/scsi/myrs.c cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size); cs 2315 drivers/scsi/myrs.c if (cs->mmio_base == NULL) { cs 2321 drivers/scsi/myrs.c cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK); cs 2322 drivers/scsi/myrs.c if (privdata->hw_init(pdev, cs, cs->io_base)) cs 2326 drivers/scsi/myrs.c if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) { cs 2331 drivers/scsi/myrs.c cs->irq = pdev->irq; cs 2332 drivers/scsi/myrs.c return cs; cs 2337 drivers/scsi/myrs.c myrs_cleanup(cs); cs 2347 drivers/scsi/myrs.c static bool myrs_err_status(struct myrs_hba *cs, unsigned char status, cs 2350 drivers/scsi/myrs.c struct pci_dev *pdev = cs->pdev; cs 2578 drivers/scsi/myrs.c struct myrs_hba *cs, void __iomem *base) cs 2590 drivers/scsi/myrs.c myrs_err_status(cs, status, parm0, parm1)) cs 2600 drivers/scsi/myrs.c if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) { cs 2607 drivers/scsi/myrs.c cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox; cs 2608 drivers/scsi/myrs.c cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd; cs 2609 drivers/scsi/myrs.c cs->disable_intr = DAC960_GEM_disable_intr; cs 2610 drivers/scsi/myrs.c cs->reset = DAC960_GEM_reset_ctrl; cs 2616 drivers/scsi/myrs.c struct myrs_hba *cs = arg; cs 2617 drivers/scsi/myrs.c void __iomem *base = cs->io_base; cs 2621 drivers/scsi/myrs.c spin_lock_irqsave(&cs->queue_lock, flags); cs 2623 drivers/scsi/myrs.c next_stat_mbox = cs->next_stat_mbox; cs 2630 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 2632 drivers/scsi/myrs.c cmd_blk = &cs->mcmd_blk; cs 2634 drivers/scsi/myrs.c scmd = scsi_host_find_tag(cs->host, id - 3); cs 2643 drivers/scsi/myrs.c dev_err(&cs->pdev->dev, cs 2647 drivers/scsi/myrs.c if (++next_stat_mbox > cs->last_stat_mbox) cs 2648 drivers/scsi/myrs.c next_stat_mbox = cs->first_stat_mbox; cs 2652 drivers/scsi/myrs.c myrs_handle_cmdblk(cs, cmd_blk); cs 2654 drivers/scsi/myrs.c myrs_handle_scsi(cs, cmd_blk, scmd); cs 2657 drivers/scsi/myrs.c cs->next_stat_mbox = next_stat_mbox; cs 2658 drivers/scsi/myrs.c spin_unlock_irqrestore(&cs->queue_lock, flags); cs 2828 drivers/scsi/myrs.c struct myrs_hba *cs, void __iomem *base) cs 2840 drivers/scsi/myrs.c myrs_err_status(cs, status, parm0, parm1)) cs 2850 drivers/scsi/myrs.c if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) { cs 2857 drivers/scsi/myrs.c cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox; cs 2858 drivers/scsi/myrs.c cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd; cs 2859 drivers/scsi/myrs.c cs->disable_intr = DAC960_BA_disable_intr; cs 2860 drivers/scsi/myrs.c cs->reset = DAC960_BA_reset_ctrl; cs 2866 drivers/scsi/myrs.c struct myrs_hba *cs = arg; cs 2867 drivers/scsi/myrs.c void __iomem *base = cs->io_base; cs 2871 drivers/scsi/myrs.c spin_lock_irqsave(&cs->queue_lock, flags); cs 2873 drivers/scsi/myrs.c next_stat_mbox = cs->next_stat_mbox; cs 2880 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 2882 drivers/scsi/myrs.c cmd_blk = &cs->mcmd_blk; cs 2884 drivers/scsi/myrs.c scmd = scsi_host_find_tag(cs->host, id - 3); cs 2893 drivers/scsi/myrs.c dev_err(&cs->pdev->dev, cs 2897 drivers/scsi/myrs.c if (++next_stat_mbox > cs->last_stat_mbox) cs 2898 drivers/scsi/myrs.c next_stat_mbox = cs->first_stat_mbox; cs 2902 drivers/scsi/myrs.c myrs_handle_cmdblk(cs, cmd_blk); cs 2904 drivers/scsi/myrs.c myrs_handle_scsi(cs, cmd_blk, scmd); cs 2907 drivers/scsi/myrs.c cs->next_stat_mbox = next_stat_mbox; cs 2908 drivers/scsi/myrs.c spin_unlock_irqrestore(&cs->queue_lock, flags); cs 3077 drivers/scsi/myrs.c struct myrs_hba *cs, void __iomem *base) cs 3089 drivers/scsi/myrs.c myrs_err_status(cs, status, parm0, parm1)) cs 3099 drivers/scsi/myrs.c if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) { cs 3106 drivers/scsi/myrs.c cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox; cs 3107 drivers/scsi/myrs.c cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd; cs 3108 drivers/scsi/myrs.c cs->disable_intr = DAC960_LP_disable_intr; cs 3109 drivers/scsi/myrs.c cs->reset = DAC960_LP_reset_ctrl; cs 3116 drivers/scsi/myrs.c struct myrs_hba *cs = arg; cs 3117 drivers/scsi/myrs.c void __iomem *base = cs->io_base; cs 3121 drivers/scsi/myrs.c spin_lock_irqsave(&cs->queue_lock, flags); cs 3123 drivers/scsi/myrs.c next_stat_mbox = cs->next_stat_mbox; cs 3130 drivers/scsi/myrs.c cmd_blk = &cs->dcmd_blk; cs 3132 drivers/scsi/myrs.c cmd_blk = &cs->mcmd_blk; cs 3134 drivers/scsi/myrs.c scmd = scsi_host_find_tag(cs->host, id - 3); cs 3143 drivers/scsi/myrs.c dev_err(&cs->pdev->dev, cs 3147 drivers/scsi/myrs.c if (++next_stat_mbox > cs->last_stat_mbox) cs 3148 drivers/scsi/myrs.c next_stat_mbox = cs->first_stat_mbox; cs 3152 drivers/scsi/myrs.c myrs_handle_cmdblk(cs, cmd_blk); cs 3154 drivers/scsi/myrs.c myrs_handle_scsi(cs, cmd_blk, scmd); cs 3157 drivers/scsi/myrs.c cs->next_stat_mbox = next_stat_mbox; cs 3158 drivers/scsi/myrs.c spin_unlock_irqrestore(&cs->queue_lock, flags); cs 3174 drivers/scsi/myrs.c struct myrs_hba *cs; cs 3177 drivers/scsi/myrs.c cs = myrs_detect(dev, entry); cs 3178 drivers/scsi/myrs.c if (!cs) cs 3181 drivers/scsi/myrs.c ret = myrs_get_config(cs); cs 3183 drivers/scsi/myrs.c myrs_cleanup(cs); cs 3187 drivers/scsi/myrs.c if (!myrs_create_mempools(dev, cs)) { cs 3192 drivers/scsi/myrs.c ret = scsi_add_host(cs->host, &dev->dev); cs 3195 drivers/scsi/myrs.c myrs_destroy_mempools(cs); cs 3198 drivers/scsi/myrs.c scsi_scan_host(cs->host); cs 3201 drivers/scsi/myrs.c myrs_cleanup(cs); cs 3208 drivers/scsi/myrs.c struct myrs_hba *cs = pci_get_drvdata(pdev); cs 3210 drivers/scsi/myrs.c if (cs == NULL) cs 3213 drivers/scsi/myrs.c shost_printk(KERN_NOTICE, cs->host, "Flushing Cache..."); cs 3214 drivers/scsi/myrs.c myrs_flush_cache(cs); cs 3215 drivers/scsi/myrs.c myrs_destroy_mempools(cs); cs 3216 drivers/scsi/myrs.c myrs_cleanup(cs); cs 2215 drivers/soc/fsl/qbman/qman.c *result = !!query_cgr.cgr.cs; cs 136 drivers/spi/spi-armada-3700.c static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs) cs 141 drivers/spi/spi-armada-3700.c val |= (A3700_SPI_EN << cs); cs 146 drivers/spi/spi-armada-3700.c unsigned int cs) cs 151 drivers/spi/spi-armada-3700.c val &= ~(A3700_SPI_EN << cs); cs 67 drivers/spi/spi-axi-spi-engine.c #define SPI_ENGINE_CMD_ASSERT(delay, cs) \ cs 68 drivers/spi/spi-axi-spi-engine.c SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs)) cs 507 drivers/spi/spi-bcm-qspi.c static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) cs 514 drivers/spi/spi-bcm-qspi.c wr = (rd & ~0xff) | (1 << cs); cs 521 drivers/spi/spi-bcm-qspi.c dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs); cs 522 drivers/spi/spi-bcm-qspi.c qspi->curr_cs = cs; cs 341 drivers/spi/spi-bcm2835.c u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); cs 344 drivers/spi/spi-bcm2835.c cs &= ~(BCM2835_SPI_CS_INTR | cs 354 drivers/spi/spi-bcm2835.c cs |= BCM2835_SPI_CS_DONE; cs 356 drivers/spi/spi-bcm2835.c cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; cs 359 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs); cs 368 drivers/spi/spi-bcm2835.c u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); cs 374 drivers/spi/spi-bcm2835.c if (cs & BCM2835_SPI_CS_RXF) cs 376 drivers/spi/spi-bcm2835.c else if (cs & BCM2835_SPI_CS_RXR) cs 379 drivers/spi/spi-bcm2835.c if (bs->tx_len && cs & BCM2835_SPI_CS_DONE) cs 400 drivers/spi/spi-bcm2835.c u32 cs, bool fifo_empty) cs 411 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); cs 419 drivers/spi/spi-bcm2835.c cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; cs 420 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs); cs 475 drivers/spi/spi-bcm2835.c u32 cs) cs 508 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA cs 513 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX cs 535 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA cs 539 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX cs 761 drivers/spi/spi-bcm2835.c u32 cs) cs 774 drivers/spi/spi-bcm2835.c bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs); cs 791 drivers/spi/spi-bcm2835.c cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN); cs 1014 drivers/spi/spi-bcm2835.c u32 cs) cs 1023 drivers/spi/spi-bcm2835.c bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); cs 1056 drivers/spi/spi-bcm2835.c tfr, cs, false); cs 1073 drivers/spi/spi-bcm2835.c u32 cs = bs->prepare_cs[spi->chip_select]; cs 1096 drivers/spi/spi-bcm2835.c cs |= BCM2835_SPI_CS_REN; cs 1115 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs); cs 1122 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs); cs 1125 drivers/spi/spi-bcm2835.c return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true); cs 1183 drivers/spi/spi-bcm2835.c u32 cs; cs 1191 drivers/spi/spi-bcm2835.c cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; cs 1193 drivers/spi/spi-bcm2835.c cs |= BCM2835_SPI_CS_CPOL; cs 1195 drivers/spi/spi-bcm2835.c cs |= BCM2835_SPI_CS_CPHA; cs 1196 drivers/spi/spi-bcm2835.c bs->prepare_cs[spi->chip_select] = cs; cs 1203 drivers/spi/spi-bcm2835.c bs->clear_rx_cs[spi->chip_select] = cs | cs 112 drivers/spi/spi-bcm63xx-hsspi.c static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned int cs, cs 120 drivers/spi/spi-bcm63xx-hsspi.c reg &= ~BIT(cs); cs 121 drivers/spi/spi-bcm63xx-hsspi.c if (active == !(bs->cs_polarity & BIT(cs))) cs 122 drivers/spi/spi-bcm63xx-hsspi.c reg |= BIT(cs); cs 140 drivers/spi/spi-bitbang.c struct spi_bitbang_cs *cs = spi->controller_state; cs 156 drivers/spi/spi-bitbang.c cs->txrx_bufs = bitbang_txrx_8; cs 158 drivers/spi/spi-bitbang.c cs->txrx_bufs = bitbang_txrx_16; cs 160 drivers/spi/spi-bitbang.c cs->txrx_bufs = bitbang_txrx_32; cs 168 drivers/spi/spi-bitbang.c cs->nsecs = (1000000000/2) / hz; cs 169 drivers/spi/spi-bitbang.c if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) cs 182 drivers/spi/spi-bitbang.c struct spi_bitbang_cs *cs = spi->controller_state; cs 187 drivers/spi/spi-bitbang.c if (!cs) { cs 188 drivers/spi/spi-bitbang.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 189 drivers/spi/spi-bitbang.c if (!cs) cs 191 drivers/spi/spi-bitbang.c spi->controller_state = cs; cs 195 drivers/spi/spi-bitbang.c cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; cs 196 drivers/spi/spi-bitbang.c if (!cs->txrx_word) cs 205 drivers/spi/spi-bitbang.c dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); cs 222 drivers/spi/spi-bitbang.c struct spi_bitbang_cs *cs = spi->controller_state; cs 223 drivers/spi/spi-bitbang.c unsigned nsecs = cs->nsecs; cs 239 drivers/spi/spi-bitbang.c return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags); cs 241 drivers/spi/spi-bitbang.c return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0); cs 94 drivers/spi/spi-dln2.c u8 cs; cs 135 drivers/spi/spi-dln2.c u8 cs; cs 145 drivers/spi/spi-dln2.c tx.cs = ~cs_mask; cs 153 drivers/spi/spi-dln2.c static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs) cs 155 drivers/spi/spi-dln2.c return dln2_spi_cs_set(dln2, BIT(cs)); cs 165 drivers/spi/spi-dln2.c u8 cs; cs 170 drivers/spi/spi-dln2.c tx.cs = cs_mask; cs 597 drivers/spi/spi-dln2.c if (dln2->cs != spi->chip_select) { cs 602 drivers/spi/spi-dln2.c dln2->cs = spi->chip_select; cs 705 drivers/spi/spi-dln2.c dln2->cs = 0xff; cs 815 drivers/spi/spi-dln2.c dln2->cs = 0xff; cs 63 drivers/spi/spi-dw-mmio.c u32 cs = spi->chip_select; cs 65 drivers/spi/spi-dw-mmio.c if (cs < 4) { cs 69 drivers/spi/spi-dw-mmio.c sw_mode |= MSCC_SPI_MST_SW_MODE_SW_SPI_CS(BIT(cs)); cs 329 drivers/spi/spi-fsl-espi.c struct fsl_espi_cs *cs = spi_get_ctldata(spi); cs 330 drivers/spi/spi-fsl-espi.c u32 hw_mode_old = cs->hw_mode; cs 333 drivers/spi/spi-fsl-espi.c cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); cs 335 drivers/spi/spi-fsl-espi.c cs->hw_mode |= CSMODE_LEN(bits_per_word - 1); cs 340 drivers/spi/spi-fsl-espi.c cs->hw_mode |= CSMODE_DIV16; cs 344 drivers/spi/spi-fsl-espi.c cs->hw_mode |= CSMODE_PM(pm); cs 347 drivers/spi/spi-fsl-espi.c if (cs->hw_mode != hw_mode_old) cs 349 drivers/spi/spi-fsl-espi.c cs->hw_mode); cs 480 drivers/spi/spi-fsl-espi.c struct fsl_espi_cs *cs = spi_get_ctldata(spi); cs 482 drivers/spi/spi-fsl-espi.c if (!cs) { cs 483 drivers/spi/spi-fsl-espi.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 484 drivers/spi/spi-fsl-espi.c if (!cs) cs 486 drivers/spi/spi-fsl-espi.c spi_set_ctldata(spi, cs); cs 493 drivers/spi/spi-fsl-espi.c cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select)); cs 495 drivers/spi/spi-fsl-espi.c cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH cs 499 drivers/spi/spi-fsl-espi.c cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; cs 501 drivers/spi/spi-fsl-espi.c cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; cs 503 drivers/spi/spi-fsl-espi.c cs->hw_mode |= CSMODE_REV; cs 522 drivers/spi/spi-fsl-espi.c struct fsl_espi_cs *cs = spi_get_ctldata(spi); cs 524 drivers/spi/spi-fsl-espi.c kfree(cs); cs 619 drivers/spi/spi-fsl-espi.c u32 csmode, cs, prop; cs 631 drivers/spi/spi-fsl-espi.c ret = of_property_read_u32(nc, "reg", &cs); cs 632 drivers/spi/spi-fsl-espi.c if (ret || cs >= master->num_chipselect) cs 651 drivers/spi/spi-fsl-espi.c fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode); cs 654 drivers/spi/spi-fsl-espi.c dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode); cs 92 drivers/spi/spi-fsl-spi.c struct spi_mpc8xxx_cs *cs = spi->controller_state; cs 97 drivers/spi/spi-fsl-spi.c if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) cs 104 drivers/spi/spi-fsl-spi.c mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); cs 110 drivers/spi/spi-fsl-spi.c mpc8xxx_spi_write_reg(mode, cs->hw_mode); cs 119 drivers/spi/spi-fsl-spi.c struct spi_mpc8xxx_cs *cs = spi->controller_state; cs 129 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->rx_shift = cs->rx_shift; cs 130 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->tx_shift = cs->tx_shift; cs 131 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->get_rx = cs->get_rx; cs 132 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->get_tx = cs->get_tx; cs 175 drivers/spi/spi-fsl-spi.c static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, cs 180 drivers/spi/spi-fsl-spi.c cs->rx_shift = 0; cs 181 drivers/spi/spi-fsl-spi.c cs->tx_shift = 0; cs 183 drivers/spi/spi-fsl-spi.c cs->get_rx = mpc8xxx_spi_rx_buf_u8; cs 184 drivers/spi/spi-fsl-spi.c cs->get_tx = mpc8xxx_spi_tx_buf_u8; cs 186 drivers/spi/spi-fsl-spi.c cs->get_rx = mpc8xxx_spi_rx_buf_u16; cs 187 drivers/spi/spi-fsl-spi.c cs->get_tx = mpc8xxx_spi_tx_buf_u16; cs 189 drivers/spi/spi-fsl-spi.c cs->get_rx = mpc8xxx_spi_rx_buf_u32; cs 190 drivers/spi/spi-fsl-spi.c cs->get_tx = mpc8xxx_spi_tx_buf_u32; cs 195 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift, cs 199 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->rx_shift = cs->rx_shift; cs 200 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->tx_shift = cs->tx_shift; cs 201 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->get_rx = cs->get_rx; cs 202 drivers/spi/spi-fsl-spi.c mpc8xxx_spi->get_tx = cs->get_tx; cs 207 drivers/spi/spi-fsl-spi.c static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, cs 232 drivers/spi/spi-fsl-spi.c struct spi_mpc8xxx_cs *cs = spi->controller_state; cs 249 drivers/spi/spi-fsl-spi.c bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, cs 253 drivers/spi/spi-fsl-spi.c bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, cs 265 drivers/spi/spi-fsl-spi.c cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16 cs 268 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_LEN(bits_per_word); cs 271 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_DIV16; cs 284 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_PM(pm); cs 447 drivers/spi/spi-fsl-spi.c struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); cs 452 drivers/spi/spi-fsl-spi.c if (!cs) { cs 453 drivers/spi/spi-fsl-spi.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 454 drivers/spi/spi-fsl-spi.c if (!cs) cs 456 drivers/spi/spi-fsl-spi.c spi_set_ctldata(spi, cs); cs 462 drivers/spi/spi-fsl-spi.c hw_mode = cs->hw_mode; /* Save original settings */ cs 463 drivers/spi/spi-fsl-spi.c cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); cs 465 drivers/spi/spi-fsl-spi.c cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH cs 469 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK; cs 471 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_CI_INACTIVEHIGH; cs 473 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_REV; cs 475 drivers/spi/spi-fsl-spi.c cs->hw_mode |= SPMODE_LOOP; cs 479 drivers/spi/spi-fsl-spi.c cs->hw_mode = hw_mode; /* Restore settings */ cs 491 drivers/spi/spi-fsl-spi.c struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); cs 493 drivers/spi/spi-fsl-spi.c kfree(cs); cs 556 drivers/spi/spi-fsl-spi.c u16 cs = spi->chip_select; cs 560 drivers/spi/spi-fsl-spi.c } else if (cs < mpc8xxx_spi->native_chipselects) { cs 562 drivers/spi/spi-fsl-spi.c slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs)); cs 209 drivers/spi/spi-gpio.c struct gpio_desc *cs = spi_gpio->cs_gpios[spi->chip_select]; cs 212 drivers/spi/spi-gpio.c gpiod_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); cs 218 drivers/spi/spi-gpio.c struct gpio_desc *cs; cs 227 drivers/spi/spi-gpio.c cs = spi_gpio->cs_gpios[spi->chip_select]; cs 228 drivers/spi/spi-gpio.c if (!spi->controller_state && cs) cs 229 drivers/spi/spi-gpio.c status = gpiod_direction_output(cs, cs 251 drivers/spi/spi-imx.c #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) cs 256 drivers/spi/spi-imx.c #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) cs 257 drivers/spi/spi-imx.c #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) cs 258 drivers/spi/spi-imx.c #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) cs 259 drivers/spi/spi-imx.c #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) cs 260 drivers/spi/spi-imx.c #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) cs 390 drivers/spi/spi-lantiq-ssc.c unsigned int cs = spidev->chip_select; cs 397 drivers/spi/spi-lantiq-ssc.c dev_dbg(spi->dev, "using internal chipselect %u\n", cs); cs 399 drivers/spi/spi-lantiq-ssc.c if (cs < spi->base_cs) { cs 401 drivers/spi/spi-lantiq-ssc.c "chipselect %i too small (min %i)\n", cs, spi->base_cs); cs 406 drivers/spi/spi-lantiq-ssc.c gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S); cs 410 drivers/spi/spi-lantiq-ssc.c gpocon |= 1 << (cs - spi->base_cs); cs 756 drivers/spi/spi-lantiq-ssc.c unsigned int cs = spidev->chip_select; cs 760 drivers/spi/spi-lantiq-ssc.c fgpo = (1 << (cs - spi->base_cs)); cs 762 drivers/spi/spi-lantiq-ssc.c fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S)); cs 82 drivers/spi/spi-mpc512x-psc.c struct mpc512x_psc_spi_cs *cs = spi->controller_state; cs 84 drivers/spi/spi-mpc512x-psc.c cs->speed_hz = (t && t->speed_hz) cs 86 drivers/spi/spi-mpc512x-psc.c cs->bits_per_word = (t && t->bits_per_word) cs 88 drivers/spi/spi-mpc512x-psc.c cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; cs 94 drivers/spi/spi-mpc512x-psc.c struct mpc512x_psc_spi_cs *cs = spi->controller_state; cs 122 drivers/spi/spi-mpc512x-psc.c speed = cs->speed_hz; cs 129 drivers/spi/spi-mpc512x-psc.c mps->bits_per_word = cs->bits_per_word; cs 366 drivers/spi/spi-mpc512x-psc.c struct mpc512x_psc_spi_cs *cs = spi->controller_state; cs 372 drivers/spi/spi-mpc512x-psc.c if (!cs) { cs 373 drivers/spi/spi-mpc512x-psc.c cs = kzalloc(sizeof *cs, GFP_KERNEL); cs 374 drivers/spi/spi-mpc512x-psc.c if (!cs) cs 382 drivers/spi/spi-mpc512x-psc.c kfree(cs); cs 389 drivers/spi/spi-mpc512x-psc.c spi->controller_state = cs; cs 392 drivers/spi/spi-mpc512x-psc.c cs->bits_per_word = spi->bits_per_word; cs 393 drivers/spi/spi-mpc512x-psc.c cs->speed_hz = spi->max_speed_hz; cs 61 drivers/spi/spi-mpc52xx-psc.c struct mpc52xx_psc_spi_cs *cs = spi->controller_state; cs 63 drivers/spi/spi-mpc52xx-psc.c cs->speed_hz = (t && t->speed_hz) cs 65 drivers/spi/spi-mpc52xx-psc.c cs->bits_per_word = (t && t->bits_per_word) cs 67 drivers/spi/spi-mpc52xx-psc.c cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; cs 73 drivers/spi/spi-mpc52xx-psc.c struct mpc52xx_psc_spi_cs *cs = spi->controller_state; cs 103 drivers/spi/spi-mpc52xx-psc.c if (cs->speed_hz) cs 104 drivers/spi/spi-mpc52xx-psc.c ccr |= (MCLK / cs->speed_hz - 1) & 0xFF; cs 108 drivers/spi/spi-mpc52xx-psc.c mps->bits_per_word = cs->bits_per_word; cs 262 drivers/spi/spi-mpc52xx-psc.c struct mpc52xx_psc_spi_cs *cs = spi->controller_state; cs 268 drivers/spi/spi-mpc52xx-psc.c if (!cs) { cs 269 drivers/spi/spi-mpc52xx-psc.c cs = kzalloc(sizeof *cs, GFP_KERNEL); cs 270 drivers/spi/spi-mpc52xx-psc.c if (!cs) cs 272 drivers/spi/spi-mpc52xx-psc.c spi->controller_state = cs; cs 275 drivers/spi/spi-mpc52xx-psc.c cs->bits_per_word = spi->bits_per_word; cs 276 drivers/spi/spi-mpc52xx-psc.c cs->speed_hz = spi->max_speed_hz; cs 97 drivers/spi/spi-mpc52xx.c int cs; cs 100 drivers/spi/spi-mpc52xx.c cs = ms->message->spi->chip_select; cs 101 drivers/spi/spi-mpc52xx.c gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1); cs 80 drivers/spi/spi-mt7621.c int cs = spi->chip_select; cs 97 drivers/spi/spi-mt7621.c polar = BIT(cs); cs 105 drivers/spi/spi-mxs.c static u32 mxs_spi_cs_to_reg(unsigned cs) cs 117 drivers/spi/spi-mxs.c if (cs & 1) cs 119 drivers/spi/spi-mxs.c if (cs & 2) cs 181 drivers/spi/spi-omap-100k.c struct omap1_spi100k_cs *cs = spi->controller_state; cs 187 drivers/spi/spi-omap-100k.c word_len = cs->word_len; cs 237 drivers/spi/spi-omap-100k.c struct omap1_spi100k_cs *cs = spi->controller_state; cs 247 drivers/spi/spi-omap-100k.c cs->word_len = word_len; cs 264 drivers/spi/spi-omap-100k.c struct omap1_spi100k_cs *cs = spi->controller_state; cs 268 drivers/spi/spi-omap-100k.c if (!cs) { cs 269 drivers/spi/spi-omap-100k.c cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL); cs 270 drivers/spi/spi-omap-100k.c if (!cs) cs 272 drivers/spi/spi-omap-100k.c cs->base = spi100k->base + spi->chip_select * 0x14; cs 273 drivers/spi/spi-omap-100k.c spi->controller_state = cs; cs 118 drivers/spi/spi-omap-uwire.c static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags) cs 126 drivers/spi/spi-omap-uwire.c if (cs & 1) cs 130 drivers/spi/spi-omap-uwire.c if (cs <= 1) cs 117 drivers/spi/spi-omap2-mcspi.c struct list_head cs; cs 163 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 165 drivers/spi/spi-omap2-mcspi.c writel_relaxed(val, cs->base + idx); cs 170 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 172 drivers/spi/spi-omap2-mcspi.c return readl_relaxed(cs->base + idx); cs 177 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 179 drivers/spi/spi-omap2-mcspi.c return cs->chconf0; cs 184 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 186 drivers/spi/spi-omap2-mcspi.c cs->chconf0 = val; cs 223 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 226 drivers/spi/spi-omap2-mcspi.c l = cs->chctrl0; cs 231 drivers/spi/spi-omap2-mcspi.c cs->chctrl0 = l; cs 232 drivers/spi/spi-omap2-mcspi.c mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0); cs 297 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 307 drivers/spi/spi-omap2-mcspi.c bytes_per_word = mcspi_bytes_per_word(cs->word_len); cs 440 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 441 drivers/spi/spi-omap2-mcspi.c void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; cs 455 drivers/spi/spi-omap2-mcspi.c word_len = cs->word_len; cs 584 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 599 drivers/spi/spi-omap2-mcspi.c if (cs->word_len <= 8) { cs 602 drivers/spi/spi-omap2-mcspi.c } else if (cs->word_len <= 16) { cs 613 drivers/spi/spi-omap2-mcspi.c cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; cs 614 drivers/spi/spi-omap2-mcspi.c cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; cs 668 drivers/spi/spi-omap2-mcspi.c chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; cs 692 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 695 drivers/spi/spi-omap2-mcspi.c void __iomem *base = cs->base; cs 703 drivers/spi/spi-omap2-mcspi.c word_len = cs->word_len; cs 895 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 906 drivers/spi/spi-omap2-mcspi.c cs->word_len = word_len; cs 957 drivers/spi/spi-omap2-mcspi.c cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK; cs 958 drivers/spi/spi-omap2-mcspi.c cs->chctrl0 |= extclk << 8; cs 959 drivers/spi/spi-omap2-mcspi.c mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0); cs 974 drivers/spi/spi-omap2-mcspi.c cs->mode = spi->mode; cs 1042 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs = spi->controller_state; cs 1044 drivers/spi/spi-omap2-mcspi.c if (!cs) { cs 1045 drivers/spi/spi-omap2-mcspi.c cs = kzalloc(sizeof *cs, GFP_KERNEL); cs 1046 drivers/spi/spi-omap2-mcspi.c if (!cs) cs 1048 drivers/spi/spi-omap2-mcspi.c cs->base = mcspi->base + spi->chip_select * 0x14; cs 1049 drivers/spi/spi-omap2-mcspi.c cs->phys = mcspi->phys + spi->chip_select * 0x14; cs 1050 drivers/spi/spi-omap2-mcspi.c cs->mode = 0; cs 1051 drivers/spi/spi-omap2-mcspi.c cs->chconf0 = 0; cs 1052 drivers/spi/spi-omap2-mcspi.c cs->chctrl0 = 0; cs 1053 drivers/spi/spi-omap2-mcspi.c spi->controller_state = cs; cs 1055 drivers/spi/spi-omap2-mcspi.c list_add_tail(&cs->node, &ctx->cs); cs 1084 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs; cs 1088 drivers/spi/spi-omap2-mcspi.c cs = spi->controller_state; cs 1089 drivers/spi/spi-omap2-mcspi.c list_del(&cs->node); cs 1091 drivers/spi/spi-omap2-mcspi.c kfree(cs); cs 1142 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs; cs 1150 drivers/spi/spi-omap2-mcspi.c cs = spi->controller_state; cs 1160 drivers/spi/spi-omap2-mcspi.c if (spi->mode != cs->mode) cs 1198 drivers/spi/spi-omap2-mcspi.c if (t->len > ((cs->word_len + 7) >> 3)) cs 1216 drivers/spi/spi-omap2-mcspi.c writel_relaxed(0, cs->base cs 1268 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs; cs 1275 drivers/spi/spi-omap2-mcspi.c list_for_each_entry(cs, &ctx->cs, node) { cs 1276 drivers/spi/spi-omap2-mcspi.c if (msg->spi->controller_state == cs) cs 1279 drivers/spi/spi-omap2-mcspi.c if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) { cs 1280 drivers/spi/spi-omap2-mcspi.c cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE; cs 1281 drivers/spi/spi-omap2-mcspi.c writel_relaxed(cs->chconf0, cs 1282 drivers/spi/spi-omap2-mcspi.c cs->base + OMAP2_MCSPI_CHCONF0); cs 1283 drivers/spi/spi-omap2-mcspi.c readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0); cs 1343 drivers/spi/spi-omap2-mcspi.c struct omap2_mcspi_cs *cs; cs 1349 drivers/spi/spi-omap2-mcspi.c list_for_each_entry(cs, &ctx->cs, node) { cs 1354 drivers/spi/spi-omap2-mcspi.c if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { cs 1355 drivers/spi/spi-omap2-mcspi.c cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE; cs 1356 drivers/spi/spi-omap2-mcspi.c writel_relaxed(cs->chconf0, cs 1357 drivers/spi/spi-omap2-mcspi.c cs->base + OMAP2_MCSPI_CHCONF0); cs 1358 drivers/spi/spi-omap2-mcspi.c cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE; cs 1359 drivers/spi/spi-omap2-mcspi.c writel_relaxed(cs->chconf0, cs 1360 drivers/spi/spi-omap2-mcspi.c cs->base + OMAP2_MCSPI_CHCONF0); cs 1362 drivers/spi/spi-omap2-mcspi.c writel_relaxed(cs->chconf0, cs 1363 drivers/spi/spi-omap2-mcspi.c cs->base + OMAP2_MCSPI_CHCONF0); cs 1456 drivers/spi/spi-omap2-mcspi.c INIT_LIST_HEAD(&mcspi->ctx.cs); cs 64 drivers/spi/spi-orion.c #define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \ cs 328 drivers/spi/spi-orion.c int cs; cs 333 drivers/spi/spi-orion.c cs = orion_spi->unused_hw_gpio; cs 335 drivers/spi/spi-orion.c cs = spi->chip_select; cs 339 drivers/spi/spi-orion.c ORION_SPI_CS(cs)); cs 430 drivers/spi/spi-orion.c int cs = spi->chip_select; cs 442 drivers/spi/spi-orion.c vaddr = orion_spi->child[cs].direct_access.vaddr; cs 688 drivers/spi/spi-orion.c u32 cs; cs 692 drivers/spi/spi-orion.c status = of_property_read_u32(np, "reg", &cs); cs 708 drivers/spi/spi-orion.c cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", cs); cs 716 drivers/spi/spi-orion.c cs); cs 717 drivers/spi/spi-orion.c spi->unused_hw_gpio = cs; cs 721 drivers/spi/spi-orion.c "%s-CS%d", dev_name(&pdev->dev), cs); cs 733 drivers/spi/spi-orion.c "Can't request GPIO for CS %d\n", cs); cs 745 drivers/spi/spi-orion.c status = of_address_to_resource(pdev->dev.of_node, cs + 1, r); cs 755 drivers/spi/spi-orion.c dir_acc = &spi->child[cs].direct_access; cs 763 drivers/spi/spi-orion.c dev_info(&pdev->dev, "CS%d configured for direct access\n", cs); cs 169 drivers/spi/spi-ppc4xx.c struct spi_ppc4xx_cs *cs = spi->controller_state; cs 197 drivers/spi/spi-ppc4xx.c out_8(&hw->regs->mode, cs->mode); cs 222 drivers/spi/spi-ppc4xx.c struct spi_ppc4xx_cs *cs = spi->controller_state; cs 229 drivers/spi/spi-ppc4xx.c if (cs == NULL) { cs 230 drivers/spi/spi-ppc4xx.c cs = kzalloc(sizeof *cs, GFP_KERNEL); cs 231 drivers/spi/spi-ppc4xx.c if (!cs) cs 233 drivers/spi/spi-ppc4xx.c spi->controller_state = cs; cs 240 drivers/spi/spi-ppc4xx.c cs->mode = SPI_PPC4XX_MODE_SPE; cs 244 drivers/spi/spi-ppc4xx.c cs->mode |= SPI_CLK_MODE0; cs 247 drivers/spi/spi-ppc4xx.c cs->mode |= SPI_CLK_MODE1; cs 250 drivers/spi/spi-ppc4xx.c cs->mode |= SPI_CLK_MODE2; cs 253 drivers/spi/spi-ppc4xx.c cs->mode |= SPI_CLK_MODE3; cs 258 drivers/spi/spi-ppc4xx.c cs->mode |= SPI_PPC4XX_MODE_RD; cs 266 drivers/spi/spi-ppc4xx.c unsigned int cs = spi->chip_select; cs 274 drivers/spi/spi-ppc4xx.c if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST) cs 281 drivers/spi/spi-ppc4xx.c gpio_set_value(hw->gpios[cs], cspol); cs 347 drivers/spi/spi-pxa2xx.c u32 value, cs; cs 354 drivers/spi/spi-pxa2xx.c cs = spi->chip_select; cs 355 drivers/spi/spi-pxa2xx.c cs <<= config->cs_sel_shift; cs 356 drivers/spi/spi-pxa2xx.c if (cs != (value & config->cs_sel_mask)) { cs 365 drivers/spi/spi-pxa2xx.c value |= cs; cs 1619 drivers/spi/spi-pxa2xx.c unsigned int cs) cs 1632 drivers/spi/spi-pxa2xx.c return cs - 1; cs 1639 drivers/spi/spi-pxa2xx.c return cs; cs 67 drivers/spi/spi-s3c24xx.c int cs, int pol); cs 88 drivers/spi/spi-s3c24xx.c static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol) cs 95 drivers/spi/spi-s3c24xx.c struct s3c24xx_spi_devstate *cs = spi->controller_state; cs 104 drivers/spi/spi-s3c24xx.c writeb(cs->spcon, hw->regs + S3C2410_SPCON); cs 108 drivers/spi/spi-s3c24xx.c writeb(cs->spcon | S3C2410_SPCON_ENSCK, cs 119 drivers/spi/spi-s3c24xx.c struct s3c24xx_spi_devstate *cs = spi->controller_state; cs 129 drivers/spi/spi-s3c24xx.c if (spi->mode != cs->mode) { cs 138 drivers/spi/spi-s3c24xx.c cs->mode = spi->mode; cs 139 drivers/spi/spi-s3c24xx.c cs->spcon = spcon; cs 142 drivers/spi/spi-s3c24xx.c if (cs->hz != hz) { cs 152 drivers/spi/spi-s3c24xx.c cs->hz = hz; cs 153 drivers/spi/spi-s3c24xx.c cs->sppre = div; cs 162 drivers/spi/spi-s3c24xx.c struct s3c24xx_spi_devstate *cs = spi->controller_state; cs 168 drivers/spi/spi-s3c24xx.c writeb(cs->sppre, hw->regs + S3C2410_SPPRE); cs 175 drivers/spi/spi-s3c24xx.c struct s3c24xx_spi_devstate *cs = spi->controller_state; cs 180 drivers/spi/spi-s3c24xx.c if (!cs) { cs 181 drivers/spi/spi-s3c24xx.c cs = devm_kzalloc(&spi->dev, cs 184 drivers/spi/spi-s3c24xx.c if (!cs) cs 187 drivers/spi/spi-s3c24xx.c cs->spcon = SPCON_DEFAULT; cs 188 drivers/spi/spi-s3c24xx.c cs->hz = -1; cs 189 drivers/spi/spi-s3c24xx.c spi->controller_state = cs; cs 624 drivers/spi/spi-s3c64xx.c struct s3c64xx_spi_csinfo *cs = spi->controller_data; cs 627 drivers/spi/spi-s3c64xx.c writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); cs 739 drivers/spi/spi-s3c64xx.c struct s3c64xx_spi_csinfo *cs; cs 755 drivers/spi/spi-s3c64xx.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 756 drivers/spi/spi-s3c64xx.c if (!cs) { cs 762 drivers/spi/spi-s3c64xx.c cs->fb_delay = fb_delay; cs 764 drivers/spi/spi-s3c64xx.c return cs; cs 775 drivers/spi/spi-s3c64xx.c struct s3c64xx_spi_csinfo *cs = spi->controller_data; cs 781 drivers/spi/spi-s3c64xx.c cs = s3c64xx_get_slave_ctrldata(spi); cs 782 drivers/spi/spi-s3c64xx.c spi->controller_data = cs; cs 783 drivers/spi/spi-s3c64xx.c } else if (cs) { cs 789 drivers/spi/spi-s3c64xx.c spi->cs_gpio = cs->line; cs 792 drivers/spi/spi-s3c64xx.c if (IS_ERR_OR_NULL(cs)) { cs 809 drivers/spi/spi-s3c64xx.c spi_set_ctldata(spi, cs); cs 868 drivers/spi/spi-s3c64xx.c kfree(cs); cs 875 drivers/spi/spi-s3c64xx.c struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); cs 880 drivers/spi/spi-s3c64xx.c kfree(cs); cs 1005 drivers/spi/spi-sirf.c void *cs = kmalloc(sizeof(int), GFP_KERNEL); cs 1006 drivers/spi/spi-sirf.c if (!cs) { cs 1021 drivers/spi/spi-sirf.c spi_set_ctldata(spi, cs); cs 291 drivers/spi/spi-sprd.c static void sprd_spi_chipselect(struct spi_device *sdev, bool cs) cs 299 drivers/spi/spi-sprd.c if (!cs) { cs 186 drivers/spi/spi-st-ssc4.c int cs = spi->cs_gpio; cs 194 drivers/spi/spi-st-ssc4.c if (!gpio_is_valid(cs)) { cs 195 drivers/spi/spi-st-ssc4.c dev_err(&spi->dev, "%d is not a valid gpio\n", cs); cs 199 drivers/spi/spi-st-ssc4.c ret = gpio_request(cs, dev_name(&spi->dev)); cs 201 drivers/spi/spi-st-ssc4.c dev_err(&spi->dev, "could not request gpio:%d\n", cs); cs 205 drivers/spi/spi-st-ssc4.c ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); cs 268 drivers/spi/spi-st-ssc4.c gpio_free(cs); cs 93 drivers/spi/spi-stm32-qspi.c u32 cs; cs 358 drivers/spi/spi-stm32-qspi.c cr |= FIELD_PREP(CR_FSEL, flash->cs); cs 458 drivers/spi/spi-stm32-qspi.c flash->cs = spi->chip_select; cs 38 drivers/spi/spi-sun4i.c #define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK) cs 37 drivers/spi/spi-sun6i.c #define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK) cs 124 drivers/spi/spi-synquacer.c unsigned int cs; cs 233 drivers/spi/spi-synquacer.c unsigned int speed, mode, bpw, cs, bus_width, transfer_mode; cs 253 drivers/spi/spi-synquacer.c cs = spi->chip_select; cs 260 drivers/spi/spi-synquacer.c mode == sspi->mode && cs == sspi->cs && cs 275 drivers/spi/spi-synquacer.c val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs)); cs 321 drivers/spi/spi-synquacer.c writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs)); cs 347 drivers/spi/spi-synquacer.c sspi->cs = spi->chip_select; cs 70 drivers/spi/spi-tegra114.c #define SPI_CS_SETUP_HOLD(reg, cs, val) \ cs 71 drivers/spi/spi-tegra114.c ((((val) & 0xFFu) << ((cs) * 8)) | \ cs 72 drivers/spi/spi-tegra114.c ((reg) & ~(0xFFu << ((cs) * 8)))) cs 83 drivers/spi/spi-tegra114.c #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \ cs 84 drivers/spi/spi-tegra114.c (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \ cs 85 drivers/spi/spi-tegra114.c ((reg) & ~(1 << ((cs) * 8 + 5)))) cs 86 drivers/spi/spi-tegra114.c #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \ cs 87 drivers/spi/spi-tegra114.c (reg = (((val) & 0x1F) << ((cs) * 8)) | \ cs 88 drivers/spi/spi-tegra114.c ((reg) & ~(0x1F << ((cs) * 8)))) cs 61 drivers/spi/spi-xcomm.c unsigned long cs = spi->chip_select; cs 65 drivers/spi/spi-xcomm.c chipselect |= BIT(cs); cs 67 drivers/spi/spi-xcomm.c chipselect &= ~BIT(cs); cs 190 drivers/spi/spi-xilinx.c u32 cs; cs 215 drivers/spi/spi-xilinx.c cs = xspi->cs_inactive; cs 216 drivers/spi/spi-xilinx.c cs ^= BIT(spi->chip_select); cs 219 drivers/spi/spi-xilinx.c xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET); cs 99 drivers/spi/spi-xlp.c int cs; /* slave device chip select */ cs 106 drivers/spi/spi-xlp.c int cs, int regoff) cs 108 drivers/spi/spi-xlp.c return readl(priv->base + regoff + cs * SPI_CS_OFFSET); cs 111 drivers/spi/spi-xlp.c static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs, cs 114 drivers/spi/spi-xlp.c writel(val, priv->base + regoff + cs * SPI_CS_OFFSET); cs 128 drivers/spi/spi-xlp.c int cs; cs 130 drivers/spi/spi-xlp.c for (cs = 0; cs < XLP_SPI_MAX_CS; cs++) cs 132 drivers/spi/spi-xlp.c XLP_SPI_SYS_RESET << cs); cs 140 drivers/spi/spi-xlp.c int cs; cs 143 drivers/spi/spi-xlp.c cs = spi->chip_select; cs 153 drivers/spi/spi-xlp.c xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv); cs 154 drivers/spi/spi-xlp.c xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH); cs 155 drivers/spi/spi-xlp.c cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG); cs 176 drivers/spi/spi-xlp.c xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg); cs 186 drivers/spi/spi-xlp.c rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT); cs 189 drivers/spi/spi-xlp.c rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO); cs 206 drivers/spi/spi-xlp.c txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT); cs 216 drivers/spi/spi-xlp.c xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data); cs 228 drivers/spi/spi-xlp.c stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) & cs 248 drivers/spi/spi-xlp.c xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat); cs 267 drivers/spi/spi-xlp.c xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd); cs 301 drivers/spi/spi-xlp.c xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask); cs 306 drivers/spi/spi-xlp.c xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0); cs 354 drivers/spi/spi-xlp.c xspi->cs = spi->chip_select; cs 1943 drivers/spi/spi.c int cs = ctlr->fw_translate_cs(ctlr, cs 1945 drivers/spi/spi.c if (cs < 0) cs 1946 drivers/spi/spi.c return cs; cs 1947 drivers/spi/spi.c lookup->chip_select = cs; cs 2245 drivers/spi/spi.c int nb, i, *cs; cs 2260 drivers/spi/spi.c cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), cs 2262 drivers/spi/spi.c ctlr->cs_gpios = cs; cs 2268 drivers/spi/spi.c cs[i] = -ENOENT; cs 2271 drivers/spi/spi.c cs[i] = of_get_named_gpio(np, "cs-gpios", i); cs 2289 drivers/spi/spi.c struct gpio_desc **cs; cs 2301 drivers/spi/spi.c cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), cs 2303 drivers/spi/spi.c if (!cs) cs 2305 drivers/spi/spi.c ctlr->cs_gpiods = cs; cs 2315 drivers/spi/spi.c cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, cs 2317 drivers/spi/spi.c if (IS_ERR(cs[i])) cs 2318 drivers/spi/spi.c return PTR_ERR(cs[i]); cs 2320 drivers/spi/spi.c if (cs[i]) { cs 2331 drivers/spi/spi.c gpiod_set_consumer_name(cs[i], gpioname); cs 27 drivers/staging/fbtft/fb_bd663474.c if (par->gpio.cs) cs 28 drivers/staging/fbtft/fb_bd663474.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 80 drivers/staging/fbtft/fb_ili9163.c if (par->gpio.cs) cs 81 drivers/staging/fbtft/fb_ili9163.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 88 drivers/staging/fbtft/fb_ili9325.c if (par->gpio.cs) cs 89 drivers/staging/fbtft/fb_ili9325.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 32 drivers/staging/fbtft/fb_s6d1121.c if (par->gpio.cs) cs 33 drivers/staging/fbtft/fb_s6d1121.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 31 drivers/staging/fbtft/fb_ssd1289.c if (par->gpio.cs) cs 32 drivers/staging/fbtft/fb_ssd1289.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 38 drivers/staging/fbtft/fb_ssd1325.c gpiod_set_value(par->gpio.cs, 0); cs 29 drivers/staging/fbtft/fb_upd161704.c if (par->gpio.cs) cs 30 drivers/staging/fbtft/fb_upd161704.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 115 drivers/staging/fbtft/fbtft-core.c ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs); cs 924 drivers/staging/fbtft/fbtft-core.c if (par->gpio.cs) cs 925 drivers/staging/fbtft/fbtft-core.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 1015 drivers/staging/fbtft/fbtft-core.c if (par->gpio.cs) cs 1016 drivers/staging/fbtft/fbtft-core.c gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ cs 214 drivers/staging/fbtft/fbtft.h struct gpio_desc *cs; cs 437 drivers/staging/greybus/spilib.c static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs) cs 447 drivers/staging/greybus/spilib.c request.chip_select = cs; cs 471 drivers/staging/greybus/spilib.c spi_board.chip_select = cs; cs 49 drivers/staging/isdn/gigaset/asyncdata.c struct cardstate *cs = inbuf->cs; cs 50 drivers/staging/isdn/gigaset/asyncdata.c unsigned cbytes = cs->cbytes; cs 60 drivers/staging/isdn/gigaset/asyncdata.c if (cbytes == 0 && cs->respdata[0] == '\r') { cs 62 drivers/staging/isdn/gigaset/asyncdata.c cs->respdata[0] = 0; cs 69 drivers/staging/isdn/gigaset/asyncdata.c dev_warn(cs->dev, "response too large (%d)\n", cs 73 drivers/staging/isdn/gigaset/asyncdata.c cs->cbytes = cbytes; cs 75 drivers/staging/isdn/gigaset/asyncdata.c cbytes, cs->respdata); cs 76 drivers/staging/isdn/gigaset/asyncdata.c gigaset_handle_modem_response(cs); cs 80 drivers/staging/isdn/gigaset/asyncdata.c cs->respdata[0] = c; cs 83 drivers/staging/isdn/gigaset/asyncdata.c if (cs->dle && !(inbuf->inputstate & INS_DLE_command)) cs 93 drivers/staging/isdn/gigaset/asyncdata.c } else if (cs->dle || cs 104 drivers/staging/isdn/gigaset/asyncdata.c cs->respdata[cbytes] = c; cs 109 drivers/staging/isdn/gigaset/asyncdata.c cs->cbytes = cbytes; cs 123 drivers/staging/isdn/gigaset/asyncdata.c gigaset_if_receive(inbuf->cs, src, numbytes); cs 137 drivers/staging/isdn/gigaset/asyncdata.c struct cardstate *cs = inbuf->cs; cs 138 drivers/staging/isdn/gigaset/asyncdata.c struct bc_state *bcs = cs->bcs; cs 160 drivers/staging/isdn/gigaset/asyncdata.c } else if (cs->dle || (inputstate & INS_DLE_command)) { cs 181 drivers/staging/isdn/gigaset/asyncdata.c } else if (cs->dle || cs 206 drivers/staging/isdn/gigaset/asyncdata.c dev_warn(cs->dev, cs 213 drivers/staging/isdn/gigaset/asyncdata.c dev_err(cs->dev, cs 259 drivers/staging/isdn/gigaset/asyncdata.c dev_warn(cs->dev, "received packet too long\n"); cs 284 drivers/staging/isdn/gigaset/asyncdata.c struct cardstate *cs = inbuf->cs; cs 285 drivers/staging/isdn/gigaset/asyncdata.c struct bc_state *bcs = cs->bcs; cs 306 drivers/staging/isdn/gigaset/asyncdata.c } else if (cs->dle || (inputstate & INS_DLE_command)) { cs 337 drivers/staging/isdn/gigaset/asyncdata.c struct cardstate *cs = inbuf->cs; cs 339 drivers/staging/isdn/gigaset/asyncdata.c if (cs->mstate == MS_LOCKED) cs 345 drivers/staging/isdn/gigaset/asyncdata.c (cs->dle || inbuf->inputstate & INS_DLE_command)) { cs 366 drivers/staging/isdn/gigaset/asyncdata.c dev_notice(cs->dev, cs 373 drivers/staging/isdn/gigaset/asyncdata.c dev_notice(cs->dev, cs 377 drivers/staging/isdn/gigaset/asyncdata.c if (cs->dle) cs 384 drivers/staging/isdn/gigaset/asyncdata.c if (!(cs->dle || inbuf->inputstate & INS_DLE_command)) cs 385 drivers/staging/isdn/gigaset/asyncdata.c dev_notice(cs->dev, cs 389 drivers/staging/isdn/gigaset/asyncdata.c dev_notice(cs->dev, "received <DLE><%02x>\n", cs 406 drivers/staging/isdn/gigaset/asyncdata.c struct cardstate *cs = inbuf->cs; cs 424 drivers/staging/isdn/gigaset/asyncdata.c if (cs->mstate == MS_LOCKED) cs 428 drivers/staging/isdn/gigaset/asyncdata.c else if (cs->bcs->proto2 == L2_HDLC) cs 584 drivers/staging/isdn/gigaset/asyncdata.c struct cardstate *cs = bcs->cs; cs 593 drivers/staging/isdn/gigaset/asyncdata.c dev_err(cs->dev, cs 599 drivers/staging/isdn/gigaset/asyncdata.c spin_lock_irqsave(&cs->lock, flags); cs 600 drivers/staging/isdn/gigaset/asyncdata.c if (cs->connected) cs 601 drivers/staging/isdn/gigaset/asyncdata.c tasklet_schedule(&cs->write_tasklet); cs 602 drivers/staging/isdn/gigaset/asyncdata.c spin_unlock_irqrestore(&cs->lock, flags); cs 89 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs; cs 304 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, cs 310 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) cs 315 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) cs 345 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = bcs->cs; cs 347 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL); cs 348 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_schedule_event(cs); cs 359 drivers/staging/isdn/gigaset/bas-gigaset.c static inline void error_reset(struct cardstate *cs) cs 362 drivers/staging/isdn/gigaset/bas-gigaset.c update_basstate(cs->hw.bas, BS_RESETTING, 0); cs 363 drivers/staging/isdn/gigaset/bas-gigaset.c if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT)) cs 365 drivers/staging/isdn/gigaset/bas-gigaset.c usb_queue_reset_device(cs->hw.bas->interface); cs 437 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = ucs->cs; cs 446 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 452 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 458 drivers/staging/isdn/gigaset/bas-gigaset.c rc = atread_submit(cs, BAS_TIMEOUT); cs 464 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 478 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = inbuf->cs; cs 479 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 492 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 502 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_schedule_event(cs); cs 520 drivers/staging/isdn/gigaset/bas-gigaset.c rc = atread_submit(cs, BAS_TIMEOUT); cs 528 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "control read: %s, giving up after %d tries\n", cs 530 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 549 drivers/staging/isdn/gigaset/bas-gigaset.c static int atread_submit(struct cardstate *cs, int timeout) cs 551 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 560 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 566 drivers/staging/isdn/gigaset/bas-gigaset.c dev_notice(cs->dev, cs 583 drivers/staging/isdn/gigaset/bas-gigaset.c read_ctrl_callback, cs->inbuf); cs 588 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n", cs 609 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = urb->context; cs 625 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc)); cs 643 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = ucs->cs; cs 647 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "interrupt read: giving up after %d tries\n", cs 656 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not resubmit interrupt URB: %s\n", cs 671 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = urb->context; cs 672 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 705 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "interrupt read: %s\n", cs 712 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n", cs 734 drivers/staging/isdn/gigaset/bas-gigaset.c start_cbsend(cs); cs 741 drivers/staging/isdn/gigaset/bas-gigaset.c bcs = cs->bcs + channel; cs 748 drivers/staging/isdn/gigaset/bas-gigaset.c start_cbsend(cs); cs 755 drivers/staging/isdn/gigaset/bas-gigaset.c bcs = cs->bcs + channel; cs 769 drivers/staging/isdn/gigaset/bas-gigaset.c bcs = cs->bcs + channel; cs 780 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 784 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 786 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 787 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 794 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 802 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 803 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "out of memory receiving AT data\n"); cs 808 drivers/staging/isdn/gigaset/bas-gigaset.c rc = atread_submit(cs, BAS_TIMEOUT); cs 814 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 816 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 821 drivers/staging/isdn/gigaset/bas-gigaset.c dev_notice(cs->dev, "interrupt pipe reset\n"); cs 829 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 840 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not resubmit interrupt URB: %s\n", cs 842 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 895 drivers/staging/isdn/gigaset/bas-gigaset.c urb->dev = bcs->cs->hw.bas->udev; cs 900 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(bcs->cs->dev, cs 958 drivers/staging/isdn/gigaset/bas-gigaset.c struct usb_device *udev = bcs->cs->hw.bas->udev; cs 1088 drivers/staging/isdn/gigaset/bas-gigaset.c urb->dev = ucx->bcs->cs->hw.bas->udev; cs 1120 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(ucx->bcs->cs->dev, cs 1142 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(ucx->bcs->cs->dev, cs 1161 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = bcs->cs; cs 1187 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "isoc write underrun\n"); cs 1211 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 1237 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1247 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "isoc write: stalled\n"); cs 1251 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "isoc write: %s\n", cs 1304 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = bcs->cs; cs 1324 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1355 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "isoc read: stalled\n"); cs 1359 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "isoc read: %s\n", cs 1383 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1389 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1394 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1402 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1412 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "isoc read: %d data bytes missing\n", cs 1422 drivers/staging/isdn/gigaset/bas-gigaset.c urb->dev = bcs->cs->hw.bas->udev; cs 1427 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 1447 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = ucs->cs; cs 1464 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "timeout opening AT channel\n"); cs 1465 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 1469 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "timeout opening channel 1\n"); cs 1470 drivers/staging/isdn/gigaset/bas-gigaset.c error_hangup(&cs->bcs[0]); cs 1474 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "timeout opening channel 2\n"); cs 1475 drivers/staging/isdn/gigaset/bas-gigaset.c error_hangup(&cs->bcs[1]); cs 1479 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "timeout closing AT channel\n"); cs 1480 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 1484 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "timeout closing channel 1\n"); cs 1485 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 1489 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "timeout closing channel 2\n"); cs 1490 drivers/staging/isdn/gigaset/bas-gigaset.c error_reset(cs); cs 1495 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 1501 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "request 0x%02x timed out, clearing\n", cs 1595 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = bcs->cs->hw.bas; cs 1604 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(bcs->cs->dev, cs 1623 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n", cs 1649 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = bcs->cs; cs 1653 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 1654 drivers/staging/isdn/gigaset/bas-gigaset.c if (unlikely(!cs->connected)) { cs 1656 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1660 drivers/staging/isdn/gigaset/bas-gigaset.c if (cs->hw.bas->basstate & BS_SUSPEND) { cs 1661 drivers/staging/isdn/gigaset/bas-gigaset.c dev_notice(cs->dev, cs 1663 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1669 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1670 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 1682 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not open channel B%d\n", cs 1687 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1705 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = bcs->cs; cs 1709 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 1710 drivers/staging/isdn/gigaset/bas-gigaset.c if (unlikely(!cs->connected)) { cs 1711 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1716 drivers/staging/isdn/gigaset/bas-gigaset.c if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) { cs 1718 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1727 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "closing channel B%d failed\n", cs 1730 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 1743 drivers/staging/isdn/gigaset/bas-gigaset.c static void complete_cb(struct cardstate *cs) cs 1745 drivers/staging/isdn/gigaset/bas-gigaset.c struct cmdbuf_t *cb = cs->cmdbuf; cs 1748 drivers/staging/isdn/gigaset/bas-gigaset.c cs->cmdbytes -= cs->curlen; cs 1750 drivers/staging/isdn/gigaset/bas-gigaset.c cs->curlen, cs->cmdbytes); cs 1752 drivers/staging/isdn/gigaset/bas-gigaset.c cs->cmdbuf = cb->next; cs 1753 drivers/staging/isdn/gigaset/bas-gigaset.c cs->cmdbuf->prev = NULL; cs 1754 drivers/staging/isdn/gigaset/bas-gigaset.c cs->curlen = cs->cmdbuf->len; cs 1756 drivers/staging/isdn/gigaset/bas-gigaset.c cs->cmdbuf = NULL; cs 1757 drivers/staging/isdn/gigaset/bas-gigaset.c cs->lastcmdbuf = NULL; cs 1758 drivers/staging/isdn/gigaset/bas-gigaset.c cs->curlen = 0; cs 1776 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = urb->context; cs 1777 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 1799 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1807 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1813 drivers/staging/isdn/gigaset/bas-gigaset.c if (cs->cmdbuf == NULL) { cs 1814 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, cs 1820 drivers/staging/isdn/gigaset/bas-gigaset.c dev_notice(cs->dev, "command write: %s, retry %d\n", cs 1822 drivers/staging/isdn/gigaset/bas-gigaset.c if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0) cs 1829 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 1830 drivers/staging/isdn/gigaset/bas-gigaset.c if (cs->cmdbuf != NULL) cs 1831 drivers/staging/isdn/gigaset/bas-gigaset.c complete_cb(cs); cs 1832 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 1843 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = ucs->cs; cs 1845 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "timeout waiting for HD_READY_SEND_ATDATA\n"); cs 1849 drivers/staging/isdn/gigaset/bas-gigaset.c start_cbsend(cs); cs 1863 drivers/staging/isdn/gigaset/bas-gigaset.c static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len) cs 1865 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 1871 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, cs 1884 drivers/staging/isdn/gigaset/bas-gigaset.c write_command_callback, cs); cs 1888 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n", cs 1910 drivers/staging/isdn/gigaset/bas-gigaset.c static int start_cbsend(struct cardstate *cs) cs 1913 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 1927 drivers/staging/isdn/gigaset/bas-gigaset.c rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT); cs 1930 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 1931 drivers/staging/isdn/gigaset/bas-gigaset.c while (cs->cmdbuf != NULL) cs 1932 drivers/staging/isdn/gigaset/bas-gigaset.c complete_cb(cs); cs 1933 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 1939 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 1941 drivers/staging/isdn/gigaset/bas-gigaset.c while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) { cs 1943 drivers/staging/isdn/gigaset/bas-gigaset.c rc = atwrite_submit(cs, cb->buf, cb->len); cs 1946 drivers/staging/isdn/gigaset/bas-gigaset.c complete_cb(cs); cs 1950 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 1968 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) cs 1973 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? cs 1994 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 1995 drivers/staging/isdn/gigaset/bas-gigaset.c if (!(cs->hw.bas->basstate & BS_ATRDPEND)) { cs 1996 drivers/staging/isdn/gigaset/bas-gigaset.c kfree(cs->hw.bas->rcvbuf); cs 1997 drivers/staging/isdn/gigaset/bas-gigaset.c cs->hw.bas->rcvbuf = NULL; cs 1998 drivers/staging/isdn/gigaset/bas-gigaset.c cs->hw.bas->rcvbuf_size = 0; cs 1999 drivers/staging/isdn/gigaset/bas-gigaset.c cs->hw.bas->retry_cmd_in = 0; cs 2000 drivers/staging/isdn/gigaset/bas-gigaset.c atread_submit(cs, 0); cs 2002 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 2004 drivers/staging/isdn/gigaset/bas-gigaset.c rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); cs 2013 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 2014 drivers/staging/isdn/gigaset/bas-gigaset.c cb->prev = cs->lastcmdbuf; cs 2015 drivers/staging/isdn/gigaset/bas-gigaset.c if (cs->lastcmdbuf) cs 2016 drivers/staging/isdn/gigaset/bas-gigaset.c cs->lastcmdbuf->next = cb; cs 2018 drivers/staging/isdn/gigaset/bas-gigaset.c cs->cmdbuf = cb; cs 2019 drivers/staging/isdn/gigaset/bas-gigaset.c cs->curlen = cb->len; cs 2021 drivers/staging/isdn/gigaset/bas-gigaset.c cs->cmdbytes += cb->len; cs 2022 drivers/staging/isdn/gigaset/bas-gigaset.c cs->lastcmdbuf = cb; cs 2023 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 2025 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 2026 drivers/staging/isdn/gigaset/bas-gigaset.c if (unlikely(!cs->connected)) { cs 2027 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 2030 drivers/staging/isdn/gigaset/bas-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 2031 drivers/staging/isdn/gigaset/bas-gigaset.c while (cs->cmdbuf != NULL) cs 2032 drivers/staging/isdn/gigaset/bas-gigaset.c complete_cb(cs); cs 2033 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 2036 drivers/staging/isdn/gigaset/bas-gigaset.c rc = start_cbsend(cs); cs 2037 drivers/staging/isdn/gigaset/bas-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 2050 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_write_room(struct cardstate *cs) cs 2063 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_chars_in_buffer(struct cardstate *cs) cs 2065 drivers/staging/isdn/gigaset/bas-gigaset.c return cs->cmdbytes; cs 2075 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) cs 2185 drivers/staging/isdn/gigaset/bas-gigaset.c static void gigaset_freecshw(struct cardstate *cs) cs 2188 drivers/staging/isdn/gigaset/bas-gigaset.c kfree(cs->hw.bas->int_in_buf); cs 2189 drivers/staging/isdn/gigaset/bas-gigaset.c kfree(cs->hw.bas); cs 2190 drivers/staging/isdn/gigaset/bas-gigaset.c cs->hw.bas = NULL; cs 2199 drivers/staging/isdn/gigaset/bas-gigaset.c static int gigaset_initcshw(struct cardstate *cs) cs 2203 drivers/staging/isdn/gigaset/bas-gigaset.c cs->hw.bas = ucs = kzalloc(sizeof(*ucs), GFP_KERNEL); cs 2216 drivers/staging/isdn/gigaset/bas-gigaset.c ucs->cs = cs; cs 2233 drivers/staging/isdn/gigaset/bas-gigaset.c static void freeurbs(struct cardstate *cs) cs 2235 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 2241 drivers/staging/isdn/gigaset/bas-gigaset.c ubc = cs->bcs[j].hw.bas; cs 2276 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = NULL; cs 2322 drivers/staging/isdn/gigaset/bas-gigaset.c cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode, cs 2324 drivers/staging/isdn/gigaset/bas-gigaset.c if (!cs) cs 2326 drivers/staging/isdn/gigaset/bas-gigaset.c ucs = cs->hw.bas; cs 2332 drivers/staging/isdn/gigaset/bas-gigaset.c cs->dev = &interface->dev; cs 2346 drivers/staging/isdn/gigaset/bas-gigaset.c ubc = cs->bcs[j].hw.bas; cs 2365 drivers/staging/isdn/gigaset/bas-gigaset.c ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, cs 2369 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not submit interrupt URB: %s\n", cs 2376 drivers/staging/isdn/gigaset/bas-gigaset.c rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0); cs 2382 drivers/staging/isdn/gigaset/bas-gigaset.c cs->mstate = MS_LOCKED; cs 2385 drivers/staging/isdn/gigaset/bas-gigaset.c usb_set_intfdata(interface, cs); cs 2387 drivers/staging/isdn/gigaset/bas-gigaset.c rc = gigaset_start(cs); cs 2394 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not allocate URBs\n"); cs 2397 drivers/staging/isdn/gigaset/bas-gigaset.c freeurbs(cs); cs 2400 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_freecs(cs); cs 2409 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs; cs 2413 drivers/staging/isdn/gigaset/bas-gigaset.c cs = usb_get_intfdata(interface); cs 2415 drivers/staging/isdn/gigaset/bas-gigaset.c ucs = cs->hw.bas; cs 2417 drivers/staging/isdn/gigaset/bas-gigaset.c dev_info(cs->dev, "disconnecting Gigaset base\n"); cs 2424 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_bchannel_down(cs->bcs + j); cs 2427 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_stop(cs); cs 2435 drivers/staging/isdn/gigaset/bas-gigaset.c freeurbs(cs); cs 2443 drivers/staging/isdn/gigaset/bas-gigaset.c cs->dev = NULL; cs 2444 drivers/staging/isdn/gigaset/bas-gigaset.c gigaset_freecs(cs); cs 2454 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = usb_get_intfdata(intf); cs 2455 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 2473 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, "cannot suspend:\n"); cs 2475 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, " B channel 1 open\n"); cs 2477 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, " B channel 2 open\n"); cs 2479 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, " receiving AT reply\n"); cs 2481 drivers/staging/isdn/gigaset/bas-gigaset.c dev_warn(cs->dev, " sending AT command\n"); cs 2489 drivers/staging/isdn/gigaset/bas-gigaset.c rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0); cs 2522 drivers/staging/isdn/gigaset/bas-gigaset.c struct cardstate *cs = usb_get_intfdata(intf); cs 2523 drivers/staging/isdn/gigaset/bas-gigaset.c struct bas_cardstate *ucs = cs->hw.bas; cs 2529 drivers/staging/isdn/gigaset/bas-gigaset.c dev_err(cs->dev, "could not resubmit interrupt URB: %s\n", cs 2627 drivers/staging/isdn/gigaset/bas-gigaset.c if (gigaset_shutdown(driver->cs + i) < 0) cs 2632 drivers/staging/isdn/gigaset/bas-gigaset.c ucs = driver->cs[i].hw.bas; cs 148 drivers/staging/isdn/gigaset/capi.c static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param, cs 152 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n", cs 323 drivers/staging/isdn/gigaset/capi.c static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr, cs 332 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 369 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 370 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 395 drivers/staging/isdn/gigaset/capi.c send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req), cs 414 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 415 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 494 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = at_state->cs; cs 496 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 525 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "RING ignored - bad BC %s\n", cs 553 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "RING ignored - bad HLC %s\n", cs 578 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "RING ignored - bad number %s\n", cs 593 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "RING ignored - bad number %s\n", cs 628 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n", cs 643 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", cs 649 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", cs 683 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 684 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 696 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 701 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 717 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 718 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 731 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 736 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 753 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 754 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 769 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: application %u not connected\n", cs 776 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: dropping extra application %u\n", cs 801 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 805 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 852 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 853 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 869 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: application %u not connected\n", cs 893 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: dropping extra application %u\n", cs 905 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 909 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 945 drivers/staging/isdn/gigaset/capi.c void gigaset_isdn_start(struct cardstate *cs) cs 947 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 955 drivers/staging/isdn/gigaset/capi.c iif->ctr.version.majormanuversion = cs->fwver[0]; cs 956 drivers/staging/isdn/gigaset/capi.c iif->ctr.version.minormanuversion = cs->fwver[1]; cs 958 drivers/staging/isdn/gigaset/capi.c iif->ctr.profile.nbchannel = cs->channels; cs 980 drivers/staging/isdn/gigaset/capi.c void gigaset_isdn_stop(struct cardstate *cs) cs 982 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 999 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = ctr->driverdata; cs 1007 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, cs 1014 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 1021 drivers/staging/isdn/gigaset/capi.c dev_info(cs->dev, "application %u registered\n", ap->id); cs 1032 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = bcs->cs; cs 1058 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: hanging up channel %u\n", cs 1060 drivers/staging/isdn/gigaset/capi.c gigaset_add_event(cs, &bcs->at_state, cs 1062 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 1086 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1095 drivers/staging/isdn/gigaset/capi.c for (ch = 0; ch < cs->channels; ch++) cs 1096 drivers/staging/isdn/gigaset/capi.c remove_appl_from_channel(&cs->bcs[ch], ap); cs 1101 drivers/staging/isdn/gigaset/capi.c dev_info(cs->dev, "application %u released\n", appl); cs 1119 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1128 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1144 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1154 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1183 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ", cs 1201 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: %s missing\n", cs 1208 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, cs 1225 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, cs 1263 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 1267 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1284 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1288 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1308 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1312 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1329 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1341 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1348 drivers/staging/isdn/gigaset/capi.c bcs = gigaset_get_free_channel(cs); cs 1350 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: no B channel available\n", cs 1357 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n", cs 1377 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: %s missing\n", cs 1389 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n", cs 1422 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, cs 1431 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: %s IE truncated\n", cs 1444 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 1468 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: unknown CIP value %d\n", cs 1522 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: cannot set HLC without BC\n", cs 1532 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1543 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1549 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1553 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1556 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->B1configuration, cs 1558 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->B2configuration, cs 1560 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->B3configuration, cs 1569 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->CalledPartySubaddress, cs 1571 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->CallingPartySubaddress, cs 1573 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->LLC, cs 1576 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->BChannelinformation, cs 1578 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Keypadfacility, cs 1580 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Useruserdata, cs 1582 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Facilitydataarray, cs 1594 drivers/staging/isdn/gigaset/capi.c if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands, cs 1599 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 1604 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 1623 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1632 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1641 drivers/staging/isdn/gigaset/capi.c if (!channel || channel > cs->channels) { cs 1642 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 1646 drivers/staging/isdn/gigaset/capi.c bcs = cs->bcs + channel - 1; cs 1674 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1685 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1691 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1695 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, cs 1698 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->B1configuration, cs 1700 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->B2configuration, cs 1702 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->B3configuration, cs 1707 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->ConnectedNumber, cs 1709 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->ConnectedSubaddress, cs 1711 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->LLC, cs 1714 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->BChannelinformation, cs 1716 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Keypadfacility, cs 1718 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Useruserdata, cs 1720 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Facilitydataarray, cs 1725 drivers/staging/isdn/gigaset/capi.c if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state, cs 1728 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 1755 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: application %u not found\n", cs 1777 drivers/staging/isdn/gigaset/capi.c dev_info(cs->dev, "%s: Reject=%x\n", cs 1779 drivers/staging/isdn/gigaset/capi.c if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state, cs 1782 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 1795 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1802 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1810 drivers/staging/isdn/gigaset/capi.c if (!channel || channel > cs->channels) { cs 1811 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 1816 drivers/staging/isdn/gigaset/capi.c bcs = &cs->bcs[channel - 1]; cs 1825 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI"); cs 1842 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1851 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1859 drivers/staging/isdn/gigaset/capi.c if (!channel || channel > cs->channels || cs 1861 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 1866 drivers/staging/isdn/gigaset/capi.c bcs = &cs->bcs[channel - 1]; cs 1873 drivers/staging/isdn/gigaset/capi.c if (!gigaset_add_event(cs, &bcs->at_state, cs 1878 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 1895 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1912 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 1921 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 1929 drivers/staging/isdn/gigaset/capi.c if (!channel || channel > cs->channels) { cs 1930 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 1935 drivers/staging/isdn/gigaset/capi.c bcs = cs->bcs + channel - 1; cs 1939 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->BChannelinformation, cs 1941 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Keypadfacility, cs 1943 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Useruserdata, cs 1945 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->Facilitydataarray, cs 1965 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 1975 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 1982 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", cs 1994 drivers/staging/isdn/gigaset/capi.c if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { cs 1998 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 2012 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 2019 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 2027 drivers/staging/isdn/gigaset/capi.c if (!channel || channel > cs->channels || cs 2029 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 2034 drivers/staging/isdn/gigaset/capi.c bcs = &cs->bcs[channel - 1]; cs 2044 drivers/staging/isdn/gigaset/capi.c if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { cs 2048 drivers/staging/isdn/gigaset/capi.c gigaset_schedule_event(cs); cs 2051 drivers/staging/isdn/gigaset/capi.c ignore_cstruct_param(cs, cmsg->NCPI, cs 2065 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 2079 drivers/staging/isdn/gigaset/capi.c if (channel == 0 || channel > cs->channels || ncci != 1) { cs 2080 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", cs 2085 drivers/staging/isdn/gigaset/capi.c bcs = &cs->bcs[channel - 1]; cs 2087 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: unexpected length %d\n", cs 2090 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n", cs 2098 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: reserved flags set (%x)\n", cs 2116 drivers/staging/isdn/gigaset/capi.c if (cs->ops->send_skb(bcs, skb) < 0) { cs 2126 drivers/staging/isdn/gigaset/capi.c send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle, cs 2139 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 2143 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 2159 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 2163 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 2178 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = iif->ctr.driverdata; cs 2182 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: message parser failure\n", __func__); cs 2269 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = ctr->driverdata; cs 2275 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__); cs 2282 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: application %u not registered\n", cs 2292 drivers/staging/isdn/gigaset/capi.c dev_notice(cs->dev, "%s: unsupported message %u\n", cs 2312 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: send queue empty\n", __func__); cs 2318 drivers/staging/isdn/gigaset/capi.c dev_warn(cs->dev, "%s: application %u vanished\n", cs 2325 drivers/staging/isdn/gigaset/capi.c dev_err(cs->dev, "%s: handler %x vanished\n", cs 2349 drivers/staging/isdn/gigaset/capi.c struct cardstate *cs = ctr->driverdata; cs 2355 drivers/staging/isdn/gigaset/capi.c dev_driver_string(cs->dev), dev_name(cs->dev)); cs 2356 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %d\n", "id", cs->myid); cs 2357 drivers/staging/isdn/gigaset/capi.c if (cs->gotfwver) cs 2359 drivers/staging/isdn/gigaset/capi.c cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]); cs 2360 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %d\n", "channels", cs->channels); cs 2361 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no"); cs 2363 drivers/staging/isdn/gigaset/capi.c switch (cs->mode) { cs 2381 drivers/staging/isdn/gigaset/capi.c switch (cs->mstate) { cs 2405 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no"); cs 2406 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no"); cs 2407 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no"); cs 2408 drivers/staging/isdn/gigaset/capi.c seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no"); cs 2410 drivers/staging/isdn/gigaset/capi.c for (i = 0; i < cs->channels; i++) { cs 2412 drivers/staging/isdn/gigaset/capi.c cs->bcs[i].corrupted); cs 2414 drivers/staging/isdn/gigaset/capi.c cs->bcs[i].trans_down); cs 2416 drivers/staging/isdn/gigaset/capi.c cs->bcs[i].trans_up); cs 2418 drivers/staging/isdn/gigaset/capi.c cs->bcs[i].chstate); cs 2419 drivers/staging/isdn/gigaset/capi.c switch (cs->bcs[i].proto2) { cs 2444 drivers/staging/isdn/gigaset/capi.c int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) cs 2457 drivers/staging/isdn/gigaset/capi.c iif->ctr.driverdata = cs; cs 2479 drivers/staging/isdn/gigaset/capi.c cs->iif = iif; cs 2480 drivers/staging/isdn/gigaset/capi.c cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN; cs 2488 drivers/staging/isdn/gigaset/capi.c void gigaset_isdn_unregdev(struct cardstate *cs) cs 2490 drivers/staging/isdn/gigaset/capi.c struct gigaset_capi_ctr *iif = cs->iif; cs 2494 drivers/staging/isdn/gigaset/capi.c cs->iif = NULL; cs 86 drivers/staging/isdn/gigaset/common.c static int setflags(struct cardstate *cs, unsigned flags, unsigned delay) cs 90 drivers/staging/isdn/gigaset/common.c r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags); cs 91 drivers/staging/isdn/gigaset/common.c cs->control_state = flags; cs 103 drivers/staging/isdn/gigaset/common.c int gigaset_enterconfigmode(struct cardstate *cs) cs 107 drivers/staging/isdn/gigaset/common.c cs->control_state = TIOCM_RTS; cs 109 drivers/staging/isdn/gigaset/common.c r = setflags(cs, TIOCM_DTR, 200); cs 112 drivers/staging/isdn/gigaset/common.c r = setflags(cs, 0, 200); cs 116 drivers/staging/isdn/gigaset/common.c r = setflags(cs, TIOCM_RTS, 100); cs 119 drivers/staging/isdn/gigaset/common.c r = setflags(cs, 0, 100); cs 123 drivers/staging/isdn/gigaset/common.c r = setflags(cs, TIOCM_RTS | TIOCM_DTR, 800); cs 130 drivers/staging/isdn/gigaset/common.c dev_err(cs->dev, "error %d on setuartbits\n", -r); cs 131 drivers/staging/isdn/gigaset/common.c cs->control_state = TIOCM_RTS | TIOCM_DTR; cs 132 drivers/staging/isdn/gigaset/common.c cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS | TIOCM_DTR); cs 148 drivers/staging/isdn/gigaset/common.c gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, cs 155 drivers/staging/isdn/gigaset/common.c struct cardstate *cs = from_timer(cs, t, timer); cs 161 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 163 drivers/staging/isdn/gigaset/common.c for (channel = 0; channel < cs->channels; ++channel) cs 164 drivers/staging/isdn/gigaset/common.c if (test_timeout(&cs->bcs[channel].at_state)) cs 167 drivers/staging/isdn/gigaset/common.c if (test_timeout(&cs->at_state)) cs 170 drivers/staging/isdn/gigaset/common.c list_for_each_entry(at_state, &cs->temp_at_states, list) cs 174 drivers/staging/isdn/gigaset/common.c if (cs->running) { cs 175 drivers/staging/isdn/gigaset/common.c mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK)); cs 178 drivers/staging/isdn/gigaset/common.c tasklet_schedule(&cs->event_tasklet); cs 182 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 189 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&bcs->cs->lock, flags); cs 190 drivers/staging/isdn/gigaset/common.c if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) { cs 193 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&bcs->cs->lock, flags); cs 199 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&bcs->cs->lock, flags); cs 203 drivers/staging/isdn/gigaset/common.c struct bc_state *gigaset_get_free_channel(struct cardstate *cs) cs 208 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 209 drivers/staging/isdn/gigaset/common.c if (!try_module_get(cs->driver->owner)) { cs 212 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 215 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) cs 216 drivers/staging/isdn/gigaset/common.c if (!cs->bcs[i].use_count) { cs 217 drivers/staging/isdn/gigaset/common.c ++cs->bcs[i].use_count; cs 218 drivers/staging/isdn/gigaset/common.c cs->bcs[i].busy = 1; cs 219 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 221 drivers/staging/isdn/gigaset/common.c return cs->bcs + i; cs 223 drivers/staging/isdn/gigaset/common.c module_put(cs->driver->owner); cs 224 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 233 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&bcs->cs->lock, flags); cs 237 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&bcs->cs->lock, flags); cs 242 drivers/staging/isdn/gigaset/common.c module_put(bcs->cs->driver->owner); cs 244 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&bcs->cs->lock, flags); cs 247 drivers/staging/isdn/gigaset/common.c int gigaset_get_channels(struct cardstate *cs) cs 252 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 253 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) cs 254 drivers/staging/isdn/gigaset/common.c if (cs->bcs[i].use_count) { cs 255 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 260 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) cs 261 drivers/staging/isdn/gigaset/common.c ++cs->bcs[i].use_count; cs 262 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 269 drivers/staging/isdn/gigaset/common.c void gigaset_free_channels(struct cardstate *cs) cs 275 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 276 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) cs 277 drivers/staging/isdn/gigaset/common.c --cs->bcs[i].use_count; cs 278 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 281 drivers/staging/isdn/gigaset/common.c void gigaset_block_channels(struct cardstate *cs) cs 287 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 288 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) cs 289 drivers/staging/isdn/gigaset/common.c ++cs->bcs[i].use_count; cs 290 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 293 drivers/staging/isdn/gigaset/common.c static void clear_events(struct cardstate *cs) cs 299 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->ev_lock, flags); cs 301 drivers/staging/isdn/gigaset/common.c head = cs->ev_head; cs 302 drivers/staging/isdn/gigaset/common.c tail = cs->ev_tail; cs 305 drivers/staging/isdn/gigaset/common.c ev = cs->events + head; cs 310 drivers/staging/isdn/gigaset/common.c cs->ev_head = tail; cs 312 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->ev_lock, flags); cs 329 drivers/staging/isdn/gigaset/common.c struct event_t *gigaset_add_event(struct cardstate *cs, cs 339 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->ev_lock, flags); cs 341 drivers/staging/isdn/gigaset/common.c tail = cs->ev_tail; cs 343 drivers/staging/isdn/gigaset/common.c if (unlikely(next == cs->ev_head)) cs 344 drivers/staging/isdn/gigaset/common.c dev_err(cs->dev, "event queue full\n"); cs 346 drivers/staging/isdn/gigaset/common.c event = cs->events + tail; cs 353 drivers/staging/isdn/gigaset/common.c cs->ev_tail = next; cs 356 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->ev_lock, flags); cs 372 drivers/staging/isdn/gigaset/common.c static void dealloc_temp_at_states(struct cardstate *cs) cs 376 drivers/staging/isdn/gigaset/common.c list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { cs 388 drivers/staging/isdn/gigaset/common.c bcs->cs->ops->freebcshw(bcs); cs 406 drivers/staging/isdn/gigaset/common.c struct cardstate *cs; cs 413 drivers/staging/isdn/gigaset/common.c cs = drv->cs + i; cs 414 drivers/staging/isdn/gigaset/common.c if (!(cs->flags & VALID_MINOR)) { cs 415 drivers/staging/isdn/gigaset/common.c cs->flags = VALID_MINOR; cs 416 drivers/staging/isdn/gigaset/common.c ret = cs; cs 425 drivers/staging/isdn/gigaset/common.c static void free_cs(struct cardstate *cs) cs 427 drivers/staging/isdn/gigaset/common.c cs->flags = 0; cs 430 drivers/staging/isdn/gigaset/common.c static void make_valid(struct cardstate *cs, unsigned mask) cs 433 drivers/staging/isdn/gigaset/common.c struct gigaset_driver *drv = cs->driver; cs 435 drivers/staging/isdn/gigaset/common.c cs->flags |= mask; cs 439 drivers/staging/isdn/gigaset/common.c static void make_invalid(struct cardstate *cs, unsigned mask) cs 442 drivers/staging/isdn/gigaset/common.c struct gigaset_driver *drv = cs->driver; cs 444 drivers/staging/isdn/gigaset/common.c cs->flags &= ~mask; cs 457 drivers/staging/isdn/gigaset/common.c void gigaset_freecs(struct cardstate *cs) cs 462 drivers/staging/isdn/gigaset/common.c if (!cs) cs 465 drivers/staging/isdn/gigaset/common.c mutex_lock(&cs->mutex); cs 467 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 468 drivers/staging/isdn/gigaset/common.c cs->running = 0; cs 469 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are cs 472 drivers/staging/isdn/gigaset/common.c tasklet_kill(&cs->event_tasklet); cs 473 drivers/staging/isdn/gigaset/common.c del_timer_sync(&cs->timer); cs 475 drivers/staging/isdn/gigaset/common.c switch (cs->cs_init) { cs 478 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) { cs 480 drivers/staging/isdn/gigaset/common.c gigaset_freebcs(cs->bcs + i); cs 484 drivers/staging/isdn/gigaset/common.c gigaset_free_dev_sysfs(cs); cs 486 drivers/staging/isdn/gigaset/common.c gigaset_if_free(cs); cs 489 drivers/staging/isdn/gigaset/common.c cs->ops->freecshw(cs); cs 494 drivers/staging/isdn/gigaset/common.c make_invalid(cs, VALID_ID); cs 495 drivers/staging/isdn/gigaset/common.c gigaset_isdn_unregdev(cs); cs 500 drivers/staging/isdn/gigaset/common.c clear_at_state(&cs->at_state); cs 501 drivers/staging/isdn/gigaset/common.c dealloc_temp_at_states(cs); cs 502 drivers/staging/isdn/gigaset/common.c clear_events(cs); cs 503 drivers/staging/isdn/gigaset/common.c tty_port_destroy(&cs->port); cs 508 drivers/staging/isdn/gigaset/common.c kfree(cs->inbuf); cs 509 drivers/staging/isdn/gigaset/common.c kfree(cs->bcs); cs 512 drivers/staging/isdn/gigaset/common.c mutex_unlock(&cs->mutex); cs 513 drivers/staging/isdn/gigaset/common.c free_cs(cs); cs 518 drivers/staging/isdn/gigaset/common.c struct cardstate *cs, int cid) cs 536 drivers/staging/isdn/gigaset/common.c at_state->cs = cs; cs 540 drivers/staging/isdn/gigaset/common.c at_state->replystruct = cs->tabnocid; cs 542 drivers/staging/isdn/gigaset/common.c at_state->replystruct = cs->tabcid; cs 546 drivers/staging/isdn/gigaset/common.c static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs) cs 551 drivers/staging/isdn/gigaset/common.c inbuf->cs = cs; cs 586 drivers/staging/isdn/gigaset/common.c dev_err(inbuf->cs->dev, cs 605 drivers/staging/isdn/gigaset/common.c static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs, cs 619 drivers/staging/isdn/gigaset/common.c gigaset_at_init(&bcs->at_state, bcs, cs, -1); cs 630 drivers/staging/isdn/gigaset/common.c bcs->cs = cs; cs 635 drivers/staging/isdn/gigaset/common.c bcs->ignore = cs->ignoreframes; cs 645 drivers/staging/isdn/gigaset/common.c return cs->ops->initbcshw(bcs); cs 670 drivers/staging/isdn/gigaset/common.c struct cardstate *cs; cs 675 drivers/staging/isdn/gigaset/common.c cs = alloc_cs(drv); cs 676 drivers/staging/isdn/gigaset/common.c if (!cs) { cs 681 drivers/staging/isdn/gigaset/common.c cs->cs_init = 0; cs 682 drivers/staging/isdn/gigaset/common.c cs->channels = channels; cs 683 drivers/staging/isdn/gigaset/common.c cs->onechannel = onechannel; cs 684 drivers/staging/isdn/gigaset/common.c cs->ignoreframes = ignoreframes; cs 685 drivers/staging/isdn/gigaset/common.c INIT_LIST_HEAD(&cs->temp_at_states); cs 686 drivers/staging/isdn/gigaset/common.c cs->running = 0; cs 687 drivers/staging/isdn/gigaset/common.c timer_setup(&cs->timer, timer_tick, 0); cs 688 drivers/staging/isdn/gigaset/common.c spin_lock_init(&cs->ev_lock); cs 689 drivers/staging/isdn/gigaset/common.c cs->ev_tail = 0; cs 690 drivers/staging/isdn/gigaset/common.c cs->ev_head = 0; cs 692 drivers/staging/isdn/gigaset/common.c tasklet_init(&cs->event_tasklet, gigaset_handle_event, cs 693 drivers/staging/isdn/gigaset/common.c (unsigned long) cs); cs 694 drivers/staging/isdn/gigaset/common.c tty_port_init(&cs->port); cs 695 drivers/staging/isdn/gigaset/common.c cs->commands_pending = 0; cs 696 drivers/staging/isdn/gigaset/common.c cs->cur_at_seq = 0; cs 697 drivers/staging/isdn/gigaset/common.c cs->gotfwver = -1; cs 698 drivers/staging/isdn/gigaset/common.c cs->dev = NULL; cs 699 drivers/staging/isdn/gigaset/common.c cs->tty_dev = NULL; cs 700 drivers/staging/isdn/gigaset/common.c cs->cidmode = cidmode != 0; cs 701 drivers/staging/isdn/gigaset/common.c cs->tabnocid = gigaset_tab_nocid; cs 702 drivers/staging/isdn/gigaset/common.c cs->tabcid = gigaset_tab_cid; cs 704 drivers/staging/isdn/gigaset/common.c init_waitqueue_head(&cs->waitqueue); cs 705 drivers/staging/isdn/gigaset/common.c cs->waiting = 0; cs 707 drivers/staging/isdn/gigaset/common.c cs->mode = M_UNKNOWN; cs 708 drivers/staging/isdn/gigaset/common.c cs->mstate = MS_UNINITIALIZED; cs 710 drivers/staging/isdn/gigaset/common.c cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), GFP_KERNEL); cs 711 drivers/staging/isdn/gigaset/common.c cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL); cs 712 drivers/staging/isdn/gigaset/common.c if (!cs->bcs || !cs->inbuf) { cs 716 drivers/staging/isdn/gigaset/common.c ++cs->cs_init; cs 719 drivers/staging/isdn/gigaset/common.c spin_lock_init(&cs->lock); cs 720 drivers/staging/isdn/gigaset/common.c gigaset_at_init(&cs->at_state, NULL, cs, 0); cs 721 drivers/staging/isdn/gigaset/common.c cs->dle = 0; cs 722 drivers/staging/isdn/gigaset/common.c cs->cbytes = 0; cs 725 drivers/staging/isdn/gigaset/common.c gigaset_inbuf_init(cs->inbuf, cs); cs 727 drivers/staging/isdn/gigaset/common.c cs->connected = 0; cs 728 drivers/staging/isdn/gigaset/common.c cs->isdn_up = 0; cs 731 drivers/staging/isdn/gigaset/common.c cs->cmdbuf = cs->lastcmdbuf = NULL; cs 732 drivers/staging/isdn/gigaset/common.c spin_lock_init(&cs->cmdlock); cs 733 drivers/staging/isdn/gigaset/common.c cs->curlen = 0; cs 734 drivers/staging/isdn/gigaset/common.c cs->cmdbytes = 0; cs 737 drivers/staging/isdn/gigaset/common.c if (gigaset_isdn_regdev(cs, modulename) < 0) { cs 742 drivers/staging/isdn/gigaset/common.c make_valid(cs, VALID_ID); cs 743 drivers/staging/isdn/gigaset/common.c ++cs->cs_init; cs 745 drivers/staging/isdn/gigaset/common.c if (cs->ops->initcshw(cs) < 0) cs 748 drivers/staging/isdn/gigaset/common.c ++cs->cs_init; cs 751 drivers/staging/isdn/gigaset/common.c gigaset_if_init(cs); cs 754 drivers/staging/isdn/gigaset/common.c gigaset_init_dev_sysfs(cs); cs 759 drivers/staging/isdn/gigaset/common.c if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) { cs 765 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 766 drivers/staging/isdn/gigaset/common.c cs->running = 1; cs 767 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 768 drivers/staging/isdn/gigaset/common.c cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK); cs 769 drivers/staging/isdn/gigaset/common.c add_timer(&cs->timer); cs 772 drivers/staging/isdn/gigaset/common.c return cs; cs 776 drivers/staging/isdn/gigaset/common.c gigaset_freecs(cs); cs 785 drivers/staging/isdn/gigaset/common.c struct cardstate *cs = bcs->cs; cs 791 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 797 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 808 drivers/staging/isdn/gigaset/common.c bcs->ignore = cs->ignoreframes; cs 812 drivers/staging/isdn/gigaset/common.c cs->ops->reinitbcshw(bcs); cs 815 drivers/staging/isdn/gigaset/common.c static void cleanup_cs(struct cardstate *cs) cs 821 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 823 drivers/staging/isdn/gigaset/common.c cs->mode = M_UNKNOWN; cs 824 drivers/staging/isdn/gigaset/common.c cs->mstate = MS_UNINITIALIZED; cs 826 drivers/staging/isdn/gigaset/common.c clear_at_state(&cs->at_state); cs 827 drivers/staging/isdn/gigaset/common.c dealloc_temp_at_states(cs); cs 828 drivers/staging/isdn/gigaset/common.c gigaset_at_init(&cs->at_state, NULL, cs, 0); cs 830 drivers/staging/isdn/gigaset/common.c cs->inbuf->inputstate = INS_command; cs 831 drivers/staging/isdn/gigaset/common.c cs->inbuf->head = 0; cs 832 drivers/staging/isdn/gigaset/common.c cs->inbuf->tail = 0; cs 834 drivers/staging/isdn/gigaset/common.c cb = cs->cmdbuf; cs 840 drivers/staging/isdn/gigaset/common.c cs->cmdbuf = cs->lastcmdbuf = NULL; cs 841 drivers/staging/isdn/gigaset/common.c cs->curlen = 0; cs 842 drivers/staging/isdn/gigaset/common.c cs->cmdbytes = 0; cs 843 drivers/staging/isdn/gigaset/common.c cs->gotfwver = -1; cs 844 drivers/staging/isdn/gigaset/common.c cs->dle = 0; cs 845 drivers/staging/isdn/gigaset/common.c cs->cur_at_seq = 0; cs 846 drivers/staging/isdn/gigaset/common.c cs->commands_pending = 0; cs 847 drivers/staging/isdn/gigaset/common.c cs->cbytes = 0; cs 849 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 851 drivers/staging/isdn/gigaset/common.c for (i = 0; i < cs->channels; ++i) { cs 852 drivers/staging/isdn/gigaset/common.c gigaset_freebcs(cs->bcs + i); cs 853 drivers/staging/isdn/gigaset/common.c if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) cs 857 drivers/staging/isdn/gigaset/common.c if (cs->waiting) { cs 858 drivers/staging/isdn/gigaset/common.c cs->cmd_result = -ENODEV; cs 859 drivers/staging/isdn/gigaset/common.c cs->waiting = 0; cs 860 drivers/staging/isdn/gigaset/common.c wake_up_interruptible(&cs->waitqueue); cs 876 drivers/staging/isdn/gigaset/common.c int gigaset_start(struct cardstate *cs) cs 880 drivers/staging/isdn/gigaset/common.c if (mutex_lock_interruptible(&cs->mutex)) cs 883 drivers/staging/isdn/gigaset/common.c spin_lock_irqsave(&cs->lock, flags); cs 884 drivers/staging/isdn/gigaset/common.c cs->connected = 1; cs 885 drivers/staging/isdn/gigaset/common.c spin_unlock_irqrestore(&cs->lock, flags); cs 887 drivers/staging/isdn/gigaset/common.c if (cs->mstate != MS_LOCKED) { cs 888 drivers/staging/isdn/gigaset/common.c cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS); cs 889 drivers/staging/isdn/gigaset/common.c cs->ops->baud_rate(cs, B115200); cs 890 drivers/staging/isdn/gigaset/common.c cs->ops->set_line_ctrl(cs, CS8); cs 891 drivers/staging/isdn/gigaset/common.c cs->control_state = TIOCM_DTR | TIOCM_RTS; cs 894 drivers/staging/isdn/gigaset/common.c cs->waiting = 1; cs 896 drivers/staging/isdn/gigaset/common.c if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { cs 897 drivers/staging/isdn/gigaset/common.c cs->waiting = 0; cs 900 drivers/staging/isdn/gigaset/common.c gigaset_schedule_event(cs); cs 902 drivers/staging/isdn/gigaset/common.c wait_event(cs->waitqueue, !cs->waiting); cs 904 drivers/staging/isdn/gigaset/common.c mutex_unlock(&cs->mutex); cs 908 drivers/staging/isdn/gigaset/common.c mutex_unlock(&cs->mutex); cs 923 drivers/staging/isdn/gigaset/common.c int gigaset_shutdown(struct cardstate *cs) cs 925 drivers/staging/isdn/gigaset/common.c mutex_lock(&cs->mutex); cs 927 drivers/staging/isdn/gigaset/common.c if (!(cs->flags & VALID_MINOR)) { cs 928 drivers/staging/isdn/gigaset/common.c mutex_unlock(&cs->mutex); cs 932 drivers/staging/isdn/gigaset/common.c cs->waiting = 1; cs 934 drivers/staging/isdn/gigaset/common.c if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) cs 936 drivers/staging/isdn/gigaset/common.c gigaset_schedule_event(cs); cs 938 drivers/staging/isdn/gigaset/common.c wait_event(cs->waitqueue, !cs->waiting); cs 940 drivers/staging/isdn/gigaset/common.c cleanup_cs(cs); cs 943 drivers/staging/isdn/gigaset/common.c mutex_unlock(&cs->mutex); cs 955 drivers/staging/isdn/gigaset/common.c void gigaset_stop(struct cardstate *cs) cs 957 drivers/staging/isdn/gigaset/common.c mutex_lock(&cs->mutex); cs 959 drivers/staging/isdn/gigaset/common.c cs->waiting = 1; cs 961 drivers/staging/isdn/gigaset/common.c if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) cs 963 drivers/staging/isdn/gigaset/common.c gigaset_schedule_event(cs); cs 965 drivers/staging/isdn/gigaset/common.c wait_event(cs->waitqueue, !cs->waiting); cs 967 drivers/staging/isdn/gigaset/common.c cleanup_cs(cs); cs 970 drivers/staging/isdn/gigaset/common.c mutex_unlock(&cs->mutex); cs 981 drivers/staging/isdn/gigaset/common.c struct cardstate *cs; cs 989 drivers/staging/isdn/gigaset/common.c cs = drv->cs + i; cs 990 drivers/staging/isdn/gigaset/common.c if ((cs->flags & VALID_ID) && cs->myid == id) { cs 991 drivers/staging/isdn/gigaset/common.c ret = cs; cs 1016 drivers/staging/isdn/gigaset/common.c if (drv->cs[index].flags & VALID_MINOR) cs 1017 drivers/staging/isdn/gigaset/common.c ret = drv->cs + index; cs 1049 drivers/staging/isdn/gigaset/common.c kfree(drv->cs); cs 1089 drivers/staging/isdn/gigaset/common.c drv->cs = kmalloc_array(minors, sizeof(*drv->cs), GFP_KERNEL); cs 1090 drivers/staging/isdn/gigaset/common.c if (!drv->cs) cs 1094 drivers/staging/isdn/gigaset/common.c drv->cs[i].flags = 0; cs 1095 drivers/staging/isdn/gigaset/common.c drv->cs[i].driver = drv; cs 1096 drivers/staging/isdn/gigaset/common.c drv->cs[i].ops = drv->ops; cs 1097 drivers/staging/isdn/gigaset/common.c drv->cs[i].minor_index = i; cs 1098 drivers/staging/isdn/gigaset/common.c mutex_init(&drv->cs[i].mutex); cs 50 drivers/staging/isdn/gigaset/dummyll.c void gigaset_isdn_start(struct cardstate *cs) cs 54 drivers/staging/isdn/gigaset/dummyll.c void gigaset_isdn_stop(struct cardstate *cs) cs 58 drivers/staging/isdn/gigaset/dummyll.c int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) cs 63 drivers/staging/isdn/gigaset/dummyll.c void gigaset_isdn_unregdev(struct cardstate *cs) cs 404 drivers/staging/isdn/gigaset/ev-layer.c static void add_cid_event(struct cardstate *cs, int cid, int type, cs 413 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->ev_lock, flags); cs 415 drivers/staging/isdn/gigaset/ev-layer.c tail = cs->ev_tail; cs 417 drivers/staging/isdn/gigaset/ev-layer.c if (unlikely(next == cs->ev_head)) { cs 418 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "event queue full\n"); cs 421 drivers/staging/isdn/gigaset/ev-layer.c event = cs->events + tail; cs 428 drivers/staging/isdn/gigaset/ev-layer.c cs->ev_tail = next; cs 431 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->ev_lock, flags); cs 442 drivers/staging/isdn/gigaset/ev-layer.c void gigaset_handle_modem_response(struct cardstate *cs) cs 450 drivers/staging/isdn/gigaset/ev-layer.c if (!cs->cbytes) { cs 452 drivers/staging/isdn/gigaset/ev-layer.c gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]); cs 455 drivers/staging/isdn/gigaset/ev-layer.c cs->respdata[cs->cbytes] = 0; cs 457 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.getstring) { cs 459 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.getstring = 0; cs 460 drivers/staging/isdn/gigaset/ev-layer.c ptr = kstrdup(cs->respdata, GFP_ATOMIC); cs 462 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, 0, RSP_STRING, ptr, 0); cs 468 drivers/staging/isdn/gigaset/ev-layer.c eoc = skip_prefix(cs->respdata, rt->response); cs 473 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, 0, RSP_NONE, NULL, 0); cs 475 drivers/staging/isdn/gigaset/ev-layer.c cs->respdata); cs 480 drivers/staging/isdn/gigaset/ev-layer.c psep = strrchr(cs->respdata, ';'); cs 491 drivers/staging/isdn/gigaset/ev-layer.c gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata); cs 501 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, NULL, 0); cs 511 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, 0, rt->resp_code, NULL, cid); cs 525 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, cs 544 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, ptr, 0); cs 552 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE); cs 565 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, NULL, zr->code); cs 577 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, ptr, 0); cs 596 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, NULL, parameter); cs 610 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = parameter; cs 612 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, NULL, parameter); cs 617 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "bad parameter in response '%s'\n", cs 618 drivers/staging/isdn/gigaset/ev-layer.c cs->respdata); cs 619 drivers/staging/isdn/gigaset/ev-layer.c add_cid_event(cs, cid, rt->resp_code, NULL, -1); cs 623 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "%s: internal error on '%s'\n", cs 624 drivers/staging/isdn/gigaset/ev-layer.c __func__, cs->respdata); cs 634 drivers/staging/isdn/gigaset/ev-layer.c struct cardstate *cs) cs 638 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 642 drivers/staging/isdn/gigaset/ev-layer.c if (!cs->cidmode) { cs 643 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_UMMODE; cs 645 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 655 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 663 drivers/staging/isdn/gigaset/ev-layer.c struct cardstate *cs, struct bc_state *bcs) cs 667 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 671 drivers/staging/isdn/gigaset/ev-layer.c if (!cs->cidmode) { cs 672 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_UMMODE; cs 674 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 676 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 679 drivers/staging/isdn/gigaset/ev-layer.c cs->ops->close_bchannel(bcs); cs 694 drivers/staging/isdn/gigaset/ev-layer.c static inline struct at_state_t *get_free_channel(struct cardstate *cs, cs 705 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) cs 706 drivers/staging/isdn/gigaset/ev-layer.c if (gigaset_get_channel(cs->bcs + i) >= 0) { cs 707 drivers/staging/isdn/gigaset/ev-layer.c ret = &cs->bcs[i].at_state; cs 712 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 715 drivers/staging/isdn/gigaset/ev-layer.c gigaset_at_init(ret, NULL, cs, cid); cs 716 drivers/staging/isdn/gigaset/ev-layer.c list_add(&ret->list, &cs->temp_at_states); cs 718 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 722 drivers/staging/isdn/gigaset/ev-layer.c static void init_failed(struct cardstate *cs, int mode) cs 727 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_INIT; cs 728 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = mode; cs 729 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_UNINITIALIZED; cs 730 drivers/staging/isdn/gigaset/ev-layer.c gigaset_free_channels(cs); cs 731 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) { cs 732 drivers/staging/isdn/gigaset/ev-layer.c at_state = &cs->bcs[i].at_state; cs 736 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 741 drivers/staging/isdn/gigaset/ev-layer.c static void schedule_init(struct cardstate *cs, int state) cs 743 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands & PC_INIT) { cs 747 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = state; cs 748 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_UNKNOWN; cs 749 drivers/staging/isdn/gigaset/ev-layer.c gigaset_block_channels(cs); cs 750 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_INIT; cs 752 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 758 drivers/staging/isdn/gigaset/ev-layer.c static void send_command(struct cardstate *cs, const char *cmd, cs 768 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 773 drivers/staging/isdn/gigaset/ev-layer.c cs->dle ? "\020(AT%d%s\020)" : "AT%d%s", cs 777 drivers/staging/isdn/gigaset/ev-layer.c cs->dle ? "\020(AT%s\020)" : "AT%s", cs 782 drivers/staging/isdn/gigaset/ev-layer.c cs->ops->write_cmd(cs, cb); cs 785 drivers/staging/isdn/gigaset/ev-layer.c static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid) cs 792 drivers/staging/isdn/gigaset/ev-layer.c return &cs->at_state; cs 794 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) cs 795 drivers/staging/isdn/gigaset/ev-layer.c if (cid == cs->bcs[i].at_state.cid) cs 796 drivers/staging/isdn/gigaset/ev-layer.c return &cs->bcs[i].at_state; cs 798 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 800 drivers/staging/isdn/gigaset/ev-layer.c list_for_each_entry(at_state, &cs->temp_at_states, list) cs 802 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 806 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 831 drivers/staging/isdn/gigaset/ev-layer.c dev_notice(bcs->cs->dev, "%s: B channel already up\n", cs 844 drivers/staging/isdn/gigaset/ev-layer.c struct cardstate *cs = at_state->cs; cs 851 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 853 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 856 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 865 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 875 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 881 drivers/staging/isdn/gigaset/ev-layer.c struct cardstate *cs = at_state->cs; cs 893 drivers/staging/isdn/gigaset/ev-layer.c dev_err(at_state->cs->dev, "out of memory\n"); cs 897 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 906 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 909 drivers/staging/isdn/gigaset/ev-layer.c static void do_start(struct cardstate *cs) cs 911 drivers/staging/isdn/gigaset/ev-layer.c gigaset_free_channels(cs); cs 913 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate != MS_LOCKED) cs 914 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_INIT); cs 916 drivers/staging/isdn/gigaset/ev-layer.c cs->isdn_up = 1; cs 917 drivers/staging/isdn/gigaset/ev-layer.c gigaset_isdn_start(cs); cs 919 drivers/staging/isdn/gigaset/ev-layer.c cs->waiting = 0; cs 920 drivers/staging/isdn/gigaset/ev-layer.c wake_up(&cs->waitqueue); cs 923 drivers/staging/isdn/gigaset/ev-layer.c static void finish_shutdown(struct cardstate *cs) cs 925 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate != MS_LOCKED) { cs 926 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_UNINITIALIZED; cs 927 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_UNKNOWN; cs 931 drivers/staging/isdn/gigaset/ev-layer.c if (cs->isdn_up) { cs 932 drivers/staging/isdn/gigaset/ev-layer.c cs->isdn_up = 0; cs 933 drivers/staging/isdn/gigaset/ev-layer.c gigaset_isdn_stop(cs); cs 938 drivers/staging/isdn/gigaset/ev-layer.c cs->cmd_result = -ENODEV; cs 939 drivers/staging/isdn/gigaset/ev-layer.c cs->waiting = 0; cs 940 drivers/staging/isdn/gigaset/ev-layer.c wake_up(&cs->waitqueue); cs 943 drivers/staging/isdn/gigaset/ev-layer.c static void do_shutdown(struct cardstate *cs) cs 945 drivers/staging/isdn/gigaset/ev-layer.c gigaset_block_channels(cs); cs 947 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate == MS_READY) { cs 948 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_SHUTDOWN; cs 949 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_SHUTDOWN; cs 951 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 953 drivers/staging/isdn/gigaset/ev-layer.c finish_shutdown(cs); cs 956 drivers/staging/isdn/gigaset/ev-layer.c static void do_stop(struct cardstate *cs) cs 960 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 961 drivers/staging/isdn/gigaset/ev-layer.c cs->connected = 0; cs 962 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 964 drivers/staging/isdn/gigaset/ev-layer.c do_shutdown(cs); cs 975 drivers/staging/isdn/gigaset/ev-layer.c static int reinit_and_retry(struct cardstate *cs, int channel) cs 979 drivers/staging/isdn/gigaset/ev-layer.c if (--cs->retry_count <= 0) cs 982 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) cs 983 drivers/staging/isdn/gigaset/ev-layer.c if (cs->bcs[i].at_state.cid > 0) cs 987 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, cs 990 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, cs 992 drivers/staging/isdn/gigaset/ev-layer.c cs->bcs[channel].at_state.pending_commands |= PC_CID; cs 994 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_INIT); cs 998 drivers/staging/isdn/gigaset/ev-layer.c static int at_state_invalid(struct cardstate *cs, cs 1006 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1008 drivers/staging/isdn/gigaset/ev-layer.c if (test_ptr == &cs->at_state) cs 1011 drivers/staging/isdn/gigaset/ev-layer.c list_for_each_entry(at_state, &cs->temp_at_states, list) cs 1015 drivers/staging/isdn/gigaset/ev-layer.c for (channel = 0; channel < cs->channels; ++channel) cs 1016 drivers/staging/isdn/gigaset/ev-layer.c if (&cs->bcs[channel].at_state == test_ptr) cs 1021 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1025 drivers/staging/isdn/gigaset/ev-layer.c static void handle_icall(struct cardstate *cs, struct bc_state *bcs, cs 1035 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "internal error: disposition=%d\n", retval); cs 1044 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1049 drivers/staging/isdn/gigaset/ev-layer.c static int do_lock(struct cardstate *cs) cs 1054 drivers/staging/isdn/gigaset/ev-layer.c switch (cs->mstate) { cs 1057 drivers/staging/isdn/gigaset/ev-layer.c if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) || cs 1058 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands) cs 1061 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) cs 1062 drivers/staging/isdn/gigaset/ev-layer.c if (cs->bcs[i].at_state.pending_commands) cs 1065 drivers/staging/isdn/gigaset/ev-layer.c if (gigaset_get_channels(cs) < 0) cs 1075 drivers/staging/isdn/gigaset/ev-layer.c mode = cs->mode; cs 1076 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_LOCKED; cs 1077 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_UNKNOWN; cs 1082 drivers/staging/isdn/gigaset/ev-layer.c static int do_unlock(struct cardstate *cs) cs 1084 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate != MS_LOCKED) cs 1087 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_UNINITIALIZED; cs 1088 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_UNKNOWN; cs 1089 drivers/staging/isdn/gigaset/ev-layer.c gigaset_free_channels(cs); cs 1090 drivers/staging/isdn/gigaset/ev-layer.c if (cs->connected) cs 1091 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_INIT); cs 1096 drivers/staging/isdn/gigaset/ev-layer.c static void do_action(int action, struct cardstate *cs, cs 1119 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_INIT; cs 1120 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1121 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_UNIMODEM; cs 1122 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1123 drivers/staging/isdn/gigaset/ev-layer.c if (!cs->cidmode) { cs 1124 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1125 drivers/staging/isdn/gigaset/ev-layer.c gigaset_free_channels(cs); cs 1126 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_READY; cs 1129 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1130 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_CIDMODE; cs 1132 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1135 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "Could not initialize the device.\n"); cs 1136 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = 0; cs 1137 drivers/staging/isdn/gigaset/ev-layer.c init_failed(cs, M_UNKNOWN); cs 1138 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1141 drivers/staging/isdn/gigaset/ev-layer.c init_failed(cs, M_CONFIG); cs 1142 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1145 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = 1; cs 1147 drivers/staging/isdn/gigaset/ev-layer.c cs->inbuf[0].inputstate &= cs 1151 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = 0; cs 1152 drivers/staging/isdn/gigaset/ev-layer.c cs->inbuf[0].inputstate = cs 1153 drivers/staging/isdn/gigaset/ev-layer.c (cs->inbuf[0].inputstate & ~INS_DLE_command) cs 1157 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) { cs 1158 drivers/staging/isdn/gigaset/ev-layer.c gigaset_free_channels(cs); cs 1159 drivers/staging/isdn/gigaset/ev-layer.c cs->mstate = MS_READY; cs 1161 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_CID; cs 1162 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1165 drivers/staging/isdn/gigaset/ev-layer.c cs->mode = M_UNIMODEM; cs 1166 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1169 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1170 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) { cs 1171 drivers/staging/isdn/gigaset/ev-layer.c init_failed(cs, M_UNKNOWN); cs 1174 drivers/staging/isdn/gigaset/ev-layer.c if (reinit_and_retry(cs, -1) < 0) cs 1175 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_RECOVER); cs 1178 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1179 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_RECOVER); cs 1183 drivers/staging/isdn/gigaset/ev-layer.c if (cs->connected) { cs 1188 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "%s: out of memory\n", cs 1197 drivers/staging/isdn/gigaset/ev-layer.c cs->ops->write_cmd(cs, cb); cs 1202 drivers/staging/isdn/gigaset/ev-layer.c at_state = get_free_channel(cs, ev->parameter); cs 1204 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, cs 1219 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1222 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1225 drivers/staging/isdn/gigaset/ev-layer.c handle_icall(cs, bcs, at_state); cs 1228 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "Could not shut down the device.\n"); cs 1232 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1233 drivers/staging/isdn/gigaset/ev-layer.c finish_shutdown(cs); cs 1236 drivers/staging/isdn/gigaset/ev-layer.c if (cs->onechannel) { cs 1238 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1243 drivers/staging/isdn/gigaset/ev-layer.c cs->ops->init_bchannel(bcs); cs 1246 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1247 drivers/staging/isdn/gigaset/ev-layer.c bcs = cs->bcs + cs->curchannel; cs 1251 drivers/staging/isdn/gigaset/ev-layer.c cs->ops->init_bchannel(bcs); cs 1257 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1260 drivers/staging/isdn/gigaset/ev-layer.c disconnect_nobc(p_at_state, cs); cs 1261 drivers/staging/isdn/gigaset/ev-layer.c } else if (cs->onechannel && cs->dle) { cs 1266 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1268 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(at_state, cs, bcs); cs 1273 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = 0; cs 1276 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1277 drivers/staging/isdn/gigaset/ev-layer.c bcs2 = cs->bcs + cs->curchannel; cs 1278 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(&bcs2->at_state, cs, bcs2); cs 1281 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1282 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "Could not hang up.\n"); cs 1285 drivers/staging/isdn/gigaset/ev-layer.c disconnect_nobc(p_at_state, cs); cs 1286 drivers/staging/isdn/gigaset/ev-layer.c else if (cs->onechannel) cs 1289 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(at_state, cs, bcs); cs 1290 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_RECOVER); cs 1293 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1294 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "Error leaving DLE mode.\n"); cs 1295 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = 0; cs 1296 drivers/staging/isdn/gigaset/ev-layer.c bcs2 = cs->bcs + cs->curchannel; cs 1297 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(&bcs2->at_state, cs, bcs2); cs 1298 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_RECOVER); cs 1301 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1302 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, cs 1304 drivers/staging/isdn/gigaset/ev-layer.c channel = cs->curchannel; cs 1305 drivers/staging/isdn/gigaset/ev-layer.c cs->bcs[channel].at_state.pending_commands |= PC_HUP; cs 1306 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1310 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1311 drivers/staging/isdn/gigaset/ev-layer.c channel = cs->curchannel; cs 1313 drivers/staging/isdn/gigaset/ev-layer.c cs->bcs[channel].at_state.cid = ev->parameter; cs 1314 drivers/staging/isdn/gigaset/ev-layer.c cs->bcs[channel].at_state.pending_commands |= cs 1316 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1321 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1322 drivers/staging/isdn/gigaset/ev-layer.c channel = cs->curchannel; cs 1323 drivers/staging/isdn/gigaset/ev-layer.c if (reinit_and_retry(cs, channel) < 0) { cs 1324 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, cs 1326 drivers/staging/isdn/gigaset/ev-layer.c bcs2 = cs->bcs + channel; cs 1327 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(&bcs2->at_state, cs, bcs2); cs 1331 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1332 drivers/staging/isdn/gigaset/ev-layer.c bcs2 = cs->bcs + cs->curchannel; cs 1333 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(&bcs2->at_state, cs, bcs2); cs 1338 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1343 drivers/staging/isdn/gigaset/ev-layer.c disconnect_bc(at_state, cs, bcs); cs 1345 drivers/staging/isdn/gigaset/ev-layer.c disconnect_nobc(p_at_state, cs); cs 1349 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1351 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1358 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1390 drivers/staging/isdn/gigaset/ev-layer.c cs->fwver[i] = val; cs 1397 drivers/staging/isdn/gigaset/ev-layer.c cs->gotfwver = 0; cs 1400 drivers/staging/isdn/gigaset/ev-layer.c if (cs->gotfwver == 0) { cs 1401 drivers/staging/isdn/gigaset/ev-layer.c cs->gotfwver = 1; cs 1404 drivers/staging/isdn/gigaset/ev-layer.c cs->fwver[0], cs->fwver[1], cs 1405 drivers/staging/isdn/gigaset/ev-layer.c cs->fwver[2], cs->fwver[3]); cs 1410 drivers/staging/isdn/gigaset/ev-layer.c cs->gotfwver = -1; cs 1411 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "could not read firmware version.\n"); cs 1416 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = SEQ_NONE; cs 1423 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n", cs 1427 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "cause code %04x in connection state %d.\n", cs 1447 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1453 drivers/staging/isdn/gigaset/ev-layer.c do_stop(cs); cs 1456 drivers/staging/isdn/gigaset/ev-layer.c do_start(cs); cs 1462 drivers/staging/isdn/gigaset/ev-layer.c cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); cs 1463 drivers/staging/isdn/gigaset/ev-layer.c cs->waiting = 0; cs 1464 drivers/staging/isdn/gigaset/ev-layer.c wake_up(&cs->waitqueue); cs 1468 drivers/staging/isdn/gigaset/ev-layer.c cs->cmd_result = -EINVAL; cs 1469 drivers/staging/isdn/gigaset/ev-layer.c else if (cs->gotfwver != 1) { cs 1470 drivers/staging/isdn/gigaset/ev-layer.c cs->cmd_result = -ENOENT; cs 1472 drivers/staging/isdn/gigaset/ev-layer.c memcpy(ev->arg, cs->fwver, sizeof cs->fwver); cs 1473 drivers/staging/isdn/gigaset/ev-layer.c cs->cmd_result = 0; cs 1475 drivers/staging/isdn/gigaset/ev-layer.c cs->waiting = 0; cs 1476 drivers/staging/isdn/gigaset/ev-layer.c wake_up(&cs->waitqueue); cs 1482 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1483 drivers/staging/isdn/gigaset/ev-layer.c if (ev->parameter != cs->cidmode) { cs 1484 drivers/staging/isdn/gigaset/ev-layer.c cs->cidmode = ev->parameter; cs 1486 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_CIDMODE; cs 1489 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_UMMODE; cs 1492 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1494 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1495 drivers/staging/isdn/gigaset/ev-layer.c cs->waiting = 0; cs 1496 drivers/staging/isdn/gigaset/ev-layer.c wake_up(&cs->waitqueue); cs 1508 drivers/staging/isdn/gigaset/ev-layer.c do_shutdown(cs); cs 1520 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, "%s: action==%d!\n", __func__, action); cs 1525 drivers/staging/isdn/gigaset/ev-layer.c static void process_event(struct cardstate *cs, struct event_t *ev) cs 1539 drivers/staging/isdn/gigaset/ev-layer.c at_state = at_state_from_cid(cs, ev->cid); cs 1543 drivers/staging/isdn/gigaset/ev-layer.c gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID, cs 1549 drivers/staging/isdn/gigaset/ev-layer.c if (at_state_invalid(cs, at_state)) { cs 1564 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1577 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1601 drivers/staging/isdn/gigaset/ev-layer.c dev_warn(cs->dev, "%s: rcode=RSP_LAST: " cs 1620 drivers/staging/isdn/gigaset/ev-layer.c do_action(rep->action[curact], cs, bcs, &at_state, &p_command, cs 1632 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1635 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1636 drivers/staging/isdn/gigaset/ev-layer.c gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL); cs 1640 drivers/staging/isdn/gigaset/ev-layer.c if (cs->connected) cs 1641 drivers/staging/isdn/gigaset/ev-layer.c send_command(cs, p_command, at_state); cs 1643 drivers/staging/isdn/gigaset/ev-layer.c gigaset_add_event(cs, at_state, RSP_NODEV, cs 1647 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1656 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1660 drivers/staging/isdn/gigaset/ev-layer.c static void schedule_sequence(struct cardstate *cs, cs 1663 drivers/staging/isdn/gigaset/ev-layer.c cs->cur_at_seq = sequence; cs 1664 drivers/staging/isdn/gigaset/ev-layer.c gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL); cs 1667 drivers/staging/isdn/gigaset/ev-layer.c static void process_command_flags(struct cardstate *cs) cs 1675 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 0; cs 1677 drivers/staging/isdn/gigaset/ev-layer.c if (cs->cur_at_seq) { cs 1687 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands & PC_SHUTDOWN) { cs 1688 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_CIDMODE; cs 1689 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) { cs 1690 drivers/staging/isdn/gigaset/ev-layer.c bcs = cs->bcs + i; cs 1704 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands & PC_INIT) { cs 1705 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_CIDMODE; cs 1706 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) { cs 1707 drivers/staging/isdn/gigaset/ev-layer.c bcs = cs->bcs + i; cs 1713 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mstate == MS_RECOVER) { cs 1724 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->lock, flags); cs 1725 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands == PC_UMMODE cs 1726 drivers/staging/isdn/gigaset/ev-layer.c && !cs->cidmode cs 1727 drivers/staging/isdn/gigaset/ev-layer.c && list_empty(&cs->temp_at_states) cs 1728 drivers/staging/isdn/gigaset/ev-layer.c && cs->mode == M_CID) { cs 1730 drivers/staging/isdn/gigaset/ev-layer.c at_state = &cs->at_state; cs 1731 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) { cs 1732 drivers/staging/isdn/gigaset/ev-layer.c bcs = cs->bcs + i; cs 1740 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->lock, flags); cs 1741 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_UMMODE; cs 1743 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, at_state, sequence); cs 1747 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) { cs 1748 drivers/staging/isdn/gigaset/ev-layer.c bcs = cs->bcs + i; cs 1750 drivers/staging/isdn/gigaset/ev-layer.c if (cs->dle) { cs 1751 drivers/staging/isdn/gigaset/ev-layer.c cs->curchannel = bcs->channel; cs 1752 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_DLE0); cs 1761 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &bcs->at_state, SEQ_HUP); cs 1767 drivers/staging/isdn/gigaset/ev-layer.c cs->curchannel = bcs->channel; cs 1768 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_NOCID); cs 1772 drivers/staging/isdn/gigaset/ev-layer.c cs->curchannel = bcs->channel; cs 1773 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_DLE0); cs 1778 drivers/staging/isdn/gigaset/ev-layer.c list_for_each_entry(at_state, &cs->temp_at_states, list) cs 1781 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, at_state, SEQ_HUP); cs 1785 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands & PC_INIT) { cs 1786 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_INIT; cs 1787 drivers/staging/isdn/gigaset/ev-layer.c cs->dle = 0; cs 1788 drivers/staging/isdn/gigaset/ev-layer.c cs->inbuf->inputstate = INS_command; cs 1789 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_INIT); cs 1792 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands & PC_SHUTDOWN) { cs 1793 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_SHUTDOWN; cs 1794 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN); cs 1797 drivers/staging/isdn/gigaset/ev-layer.c if (cs->at_state.pending_commands & PC_CIDMODE) { cs 1798 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands &= ~PC_CIDMODE; cs 1799 drivers/staging/isdn/gigaset/ev-layer.c if (cs->mode == M_UNIMODEM) { cs 1800 drivers/staging/isdn/gigaset/ev-layer.c cs->retry_count = 1; cs 1801 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE); cs 1806 drivers/staging/isdn/gigaset/ev-layer.c for (i = 0; i < cs->channels; ++i) { cs 1807 drivers/staging/isdn/gigaset/ev-layer.c bcs = cs->bcs + i; cs 1810 drivers/staging/isdn/gigaset/ev-layer.c cs->curchannel = bcs->channel; cs 1811 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_DLE1); cs 1816 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT); cs 1821 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &bcs->at_state, SEQ_DIAL); cs 1825 drivers/staging/isdn/gigaset/ev-layer.c switch (cs->mode) { cs 1827 drivers/staging/isdn/gigaset/ev-layer.c cs->at_state.pending_commands |= PC_CIDMODE; cs 1829 drivers/staging/isdn/gigaset/ev-layer.c cs->commands_pending = 1; cs 1832 drivers/staging/isdn/gigaset/ev-layer.c schedule_init(cs, MS_INIT); cs 1836 drivers/staging/isdn/gigaset/ev-layer.c cs->curchannel = bcs->channel; cs 1837 drivers/staging/isdn/gigaset/ev-layer.c cs->retry_count = 2; cs 1838 drivers/staging/isdn/gigaset/ev-layer.c schedule_sequence(cs, &cs->at_state, SEQ_CID); cs 1844 drivers/staging/isdn/gigaset/ev-layer.c static void process_events(struct cardstate *cs) cs 1853 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->ev_lock, flags); cs 1854 drivers/staging/isdn/gigaset/ev-layer.c head = cs->ev_head; cs 1857 drivers/staging/isdn/gigaset/ev-layer.c tail = cs->ev_tail; cs 1859 drivers/staging/isdn/gigaset/ev-layer.c if (!check_flags && !cs->commands_pending) cs 1862 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->ev_lock, flags); cs 1863 drivers/staging/isdn/gigaset/ev-layer.c process_command_flags(cs); cs 1864 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->ev_lock, flags); cs 1865 drivers/staging/isdn/gigaset/ev-layer.c tail = cs->ev_tail; cs 1867 drivers/staging/isdn/gigaset/ev-layer.c if (!cs->commands_pending) cs 1873 drivers/staging/isdn/gigaset/ev-layer.c ev = cs->events + head; cs 1874 drivers/staging/isdn/gigaset/ev-layer.c was_busy = cs->cur_at_seq != SEQ_NONE; cs 1875 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->ev_lock, flags); cs 1876 drivers/staging/isdn/gigaset/ev-layer.c process_event(cs, ev); cs 1877 drivers/staging/isdn/gigaset/ev-layer.c spin_lock_irqsave(&cs->ev_lock, flags); cs 1880 drivers/staging/isdn/gigaset/ev-layer.c if (was_busy && cs->cur_at_seq == SEQ_NONE) cs 1884 drivers/staging/isdn/gigaset/ev-layer.c cs->ev_head = head; cs 1887 drivers/staging/isdn/gigaset/ev-layer.c spin_unlock_irqrestore(&cs->ev_lock, flags); cs 1890 drivers/staging/isdn/gigaset/ev-layer.c dev_err(cs->dev, cs 1901 drivers/staging/isdn/gigaset/ev-layer.c struct cardstate *cs = (struct cardstate *) data; cs 1904 drivers/staging/isdn/gigaset/ev-layer.c if (cs->inbuf->head != cs->inbuf->tail) { cs 1906 drivers/staging/isdn/gigaset/ev-layer.c cs->ops->handle_input(cs->inbuf); cs 1909 drivers/staging/isdn/gigaset/ev-layer.c process_events(cs); cs 278 drivers/staging/isdn/gigaset/gigaset.h struct cardstate *cs; cs 345 drivers/staging/isdn/gigaset/gigaset.h struct cardstate *cs; cs 377 drivers/staging/isdn/gigaset/gigaset.h struct cardstate *cs; cs 511 drivers/staging/isdn/gigaset/gigaset.h struct cardstate *cs; cs 567 drivers/staging/isdn/gigaset/gigaset.h int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb); cs 570 drivers/staging/isdn/gigaset/gigaset.h int (*write_room)(struct cardstate *cs); cs 571 drivers/staging/isdn/gigaset/gigaset.h int (*chars_in_buffer)(struct cardstate *cs); cs 572 drivers/staging/isdn/gigaset/gigaset.h int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]); cs 592 drivers/staging/isdn/gigaset/gigaset.h int (*initcshw)(struct cardstate *cs); cs 595 drivers/staging/isdn/gigaset/gigaset.h void (*freecshw)(struct cardstate *cs); cs 599 drivers/staging/isdn/gigaset/gigaset.h int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, cs 601 drivers/staging/isdn/gigaset/gigaset.h int (*baud_rate)(struct cardstate *cs, unsigned cflag); cs 602 drivers/staging/isdn/gigaset/gigaset.h int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag); cs 669 drivers/staging/isdn/gigaset/gigaset.h int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid); cs 670 drivers/staging/isdn/gigaset/gigaset.h void gigaset_isdn_unregdev(struct cardstate *cs); cs 678 drivers/staging/isdn/gigaset/gigaset.h void gigaset_isdn_start(struct cardstate *cs); cs 679 drivers/staging/isdn/gigaset/gigaset.h void gigaset_isdn_stop(struct cardstate *cs); cs 695 drivers/staging/isdn/gigaset/gigaset.h void gigaset_handle_modem_response(struct cardstate *cs); cs 702 drivers/staging/isdn/gigaset/gigaset.h void gigaset_init_dev_sysfs(struct cardstate *cs); cs 703 drivers/staging/isdn/gigaset/gigaset.h void gigaset_free_dev_sysfs(struct cardstate *cs); cs 711 drivers/staging/isdn/gigaset/gigaset.h struct cardstate *cs, int cid); cs 713 drivers/staging/isdn/gigaset/gigaset.h struct bc_state *gigaset_get_free_channel(struct cardstate *cs); cs 715 drivers/staging/isdn/gigaset/gigaset.h int gigaset_get_channels(struct cardstate *cs); cs 716 drivers/staging/isdn/gigaset/gigaset.h void gigaset_free_channels(struct cardstate *cs); cs 717 drivers/staging/isdn/gigaset/gigaset.h void gigaset_block_channels(struct cardstate *cs); cs 740 drivers/staging/isdn/gigaset/gigaset.h void gigaset_freecs(struct cardstate *cs); cs 743 drivers/staging/isdn/gigaset/gigaset.h int gigaset_start(struct cardstate *cs); cs 746 drivers/staging/isdn/gigaset/gigaset.h void gigaset_stop(struct cardstate *cs); cs 749 drivers/staging/isdn/gigaset/gigaset.h int gigaset_shutdown(struct cardstate *cs); cs 755 drivers/staging/isdn/gigaset/gigaset.h struct event_t *gigaset_add_event(struct cardstate *cs, cs 760 drivers/staging/isdn/gigaset/gigaset.h int gigaset_enterconfigmode(struct cardstate *cs); cs 763 drivers/staging/isdn/gigaset/gigaset.h static inline void gigaset_schedule_event(struct cardstate *cs) cs 766 drivers/staging/isdn/gigaset/gigaset.h spin_lock_irqsave(&cs->lock, flags); cs 767 drivers/staging/isdn/gigaset/gigaset.h if (cs->running) cs 768 drivers/staging/isdn/gigaset/gigaset.h tasklet_schedule(&cs->event_tasklet); cs 769 drivers/staging/isdn/gigaset/gigaset.h spin_unlock_irqrestore(&cs->lock, flags); cs 776 drivers/staging/isdn/gigaset/gigaset.h gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL); cs 777 drivers/staging/isdn/gigaset/gigaset.h gigaset_schedule_event(bcs->cs); cs 784 drivers/staging/isdn/gigaset/gigaset.h gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL); cs 785 drivers/staging/isdn/gigaset/gigaset.h gigaset_schedule_event(bcs->cs); cs 791 drivers/staging/isdn/gigaset/gigaset.h struct cardstate *cs = bcs->cs; cs 792 drivers/staging/isdn/gigaset/gigaset.h unsigned short hw_hdr_len = cs->hw_hdr_len; cs 799 drivers/staging/isdn/gigaset/gigaset.h dev_warn(cs->dev, "could not allocate skb\n"); cs 820 drivers/staging/isdn/gigaset/gigaset.h void gigaset_if_init(struct cardstate *cs); cs 822 drivers/staging/isdn/gigaset/gigaset.h void gigaset_if_free(struct cardstate *cs); cs 824 drivers/staging/isdn/gigaset/gigaset.h void gigaset_if_receive(struct cardstate *cs, cs 18 drivers/staging/isdn/gigaset/interface.c static int if_lock(struct cardstate *cs, int *arg) cs 22 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd); cs 28 drivers/staging/isdn/gigaset/interface.c *arg = cs->mstate == MS_LOCKED; cs 32 drivers/staging/isdn/gigaset/interface.c if (!cmd && cs->mstate == MS_LOCKED && cs->connected) { cs 33 drivers/staging/isdn/gigaset/interface.c cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS); cs 34 drivers/staging/isdn/gigaset/interface.c cs->ops->baud_rate(cs, B115200); cs 35 drivers/staging/isdn/gigaset/interface.c cs->ops->set_line_ctrl(cs, CS8); cs 36 drivers/staging/isdn/gigaset/interface.c cs->control_state = TIOCM_DTR | TIOCM_RTS; cs 39 drivers/staging/isdn/gigaset/interface.c cs->waiting = 1; cs 40 drivers/staging/isdn/gigaset/interface.c if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK, cs 42 drivers/staging/isdn/gigaset/interface.c cs->waiting = 0; cs 45 drivers/staging/isdn/gigaset/interface.c gigaset_schedule_event(cs); cs 47 drivers/staging/isdn/gigaset/interface.c wait_event(cs->waitqueue, !cs->waiting); cs 49 drivers/staging/isdn/gigaset/interface.c if (cs->cmd_result >= 0) { cs 50 drivers/staging/isdn/gigaset/interface.c *arg = cs->cmd_result; cs 54 drivers/staging/isdn/gigaset/interface.c return cs->cmd_result; cs 57 drivers/staging/isdn/gigaset/interface.c static int if_version(struct cardstate *cs, unsigned arg[4]) cs 63 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd); cs 73 drivers/staging/isdn/gigaset/interface.c cs->waiting = 1; cs 74 drivers/staging/isdn/gigaset/interface.c if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER, cs 76 drivers/staging/isdn/gigaset/interface.c cs->waiting = 0; cs 79 drivers/staging/isdn/gigaset/interface.c gigaset_schedule_event(cs); cs 81 drivers/staging/isdn/gigaset/interface.c wait_event(cs->waitqueue, !cs->waiting); cs 83 drivers/staging/isdn/gigaset/interface.c if (cs->cmd_result >= 0) cs 86 drivers/staging/isdn/gigaset/interface.c return cs->cmd_result; cs 92 drivers/staging/isdn/gigaset/interface.c static int if_config(struct cardstate *cs, int *arg) cs 94 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg); cs 99 drivers/staging/isdn/gigaset/interface.c if (cs->mstate != MS_LOCKED) cs 102 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) { cs 108 drivers/staging/isdn/gigaset/interface.c return gigaset_enterconfigmode(cs); cs 115 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs; cs 120 drivers/staging/isdn/gigaset/interface.c cs = gigaset_get_cs_by_tty(tty); cs 121 drivers/staging/isdn/gigaset/interface.c if (!cs || !try_module_get(cs->driver->owner)) cs 124 drivers/staging/isdn/gigaset/interface.c if (mutex_lock_interruptible(&cs->mutex)) { cs 125 drivers/staging/isdn/gigaset/interface.c module_put(cs->driver->owner); cs 128 drivers/staging/isdn/gigaset/interface.c tty->driver_data = cs; cs 130 drivers/staging/isdn/gigaset/interface.c ++cs->port.count; cs 132 drivers/staging/isdn/gigaset/interface.c if (cs->port.count == 1) { cs 133 drivers/staging/isdn/gigaset/interface.c tty_port_tty_set(&cs->port, tty); cs 134 drivers/staging/isdn/gigaset/interface.c cs->port.low_latency = 1; cs 137 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 143 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 145 drivers/staging/isdn/gigaset/interface.c if (!cs) { /* happens if we didn't find cs in open */ cs 150 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 152 drivers/staging/isdn/gigaset/interface.c mutex_lock(&cs->mutex); cs 154 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) cs 156 drivers/staging/isdn/gigaset/interface.c else if (!cs->port.count) cs 157 drivers/staging/isdn/gigaset/interface.c dev_warn(cs->dev, "%s: device not opened\n", __func__); cs 158 drivers/staging/isdn/gigaset/interface.c else if (!--cs->port.count) cs 159 drivers/staging/isdn/gigaset/interface.c tty_port_tty_set(&cs->port, NULL); cs 161 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 163 drivers/staging/isdn/gigaset/interface.c module_put(cs->driver->owner); cs 169 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 175 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd); cs 177 drivers/staging/isdn/gigaset/interface.c if (mutex_lock_interruptible(&cs->mutex)) cs 180 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) { cs 189 drivers/staging/isdn/gigaset/interface.c retval = if_lock(cs, &int_arg); cs 196 drivers/staging/isdn/gigaset/interface.c retval = if_config(cs, &int_arg); cs 207 drivers/staging/isdn/gigaset/interface.c retval = cs->ops->brkchars(cs, buf); cs 215 drivers/staging/isdn/gigaset/interface.c retval = if_version(cs, version); cs 228 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 243 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 246 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 248 drivers/staging/isdn/gigaset/interface.c if (mutex_lock_interruptible(&cs->mutex)) cs 251 drivers/staging/isdn/gigaset/interface.c retval = cs->control_state & (TIOCM_RTS | TIOCM_DTR); cs 253 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 261 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 266 drivers/staging/isdn/gigaset/interface.c cs->minor_index, __func__, set, clear); cs 268 drivers/staging/isdn/gigaset/interface.c if (mutex_lock_interruptible(&cs->mutex)) cs 271 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) { cs 275 drivers/staging/isdn/gigaset/interface.c mc = (cs->control_state | set) & ~clear & (TIOCM_RTS | TIOCM_DTR); cs 276 drivers/staging/isdn/gigaset/interface.c retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc); cs 277 drivers/staging/isdn/gigaset/interface.c cs->control_state = mc; cs 280 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 287 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 291 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 293 drivers/staging/isdn/gigaset/interface.c if (mutex_lock_interruptible(&cs->mutex)) cs 296 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) { cs 301 drivers/staging/isdn/gigaset/interface.c if (cs->mstate != MS_LOCKED) { cs 302 drivers/staging/isdn/gigaset/interface.c dev_warn(cs->dev, "can't write to unlocked device\n"); cs 314 drivers/staging/isdn/gigaset/interface.c dev_err(cs->dev, "%s: out of memory\n", __func__); cs 323 drivers/staging/isdn/gigaset/interface.c cb->wake_tasklet = &cs->if_wake_tasklet; cs 324 drivers/staging/isdn/gigaset/interface.c retval = cs->ops->write_cmd(cs, cb); cs 326 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 332 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 335 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 337 drivers/staging/isdn/gigaset/interface.c if (mutex_lock_interruptible(&cs->mutex)) cs 340 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) { cs 343 drivers/staging/isdn/gigaset/interface.c } else if (cs->mstate != MS_LOCKED) { cs 344 drivers/staging/isdn/gigaset/interface.c dev_warn(cs->dev, "can't write to unlocked device\n"); cs 347 drivers/staging/isdn/gigaset/interface.c retval = cs->ops->write_room(cs); cs 349 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 356 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 359 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 361 drivers/staging/isdn/gigaset/interface.c mutex_lock(&cs->mutex); cs 363 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) cs 365 drivers/staging/isdn/gigaset/interface.c else if (cs->mstate != MS_LOCKED) cs 366 drivers/staging/isdn/gigaset/interface.c dev_warn(cs->dev, "can't write to unlocked device\n"); cs 368 drivers/staging/isdn/gigaset/interface.c retval = cs->ops->chars_in_buffer(cs); cs 370 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 377 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 379 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 381 drivers/staging/isdn/gigaset/interface.c mutex_lock(&cs->mutex); cs 383 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) cs 388 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 393 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 395 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 397 drivers/staging/isdn/gigaset/interface.c mutex_lock(&cs->mutex); cs 399 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) cs 404 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 409 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = tty->driver_data; cs 415 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); cs 417 drivers/staging/isdn/gigaset/interface.c mutex_lock(&cs->mutex); cs 419 drivers/staging/isdn/gigaset/interface.c if (!cs->connected) { cs 428 drivers/staging/isdn/gigaset/interface.c cs->minor_index, iflag, cflag, old_cflag); cs 431 drivers/staging/isdn/gigaset/interface.c control_state = cs->control_state; cs 447 drivers/staging/isdn/gigaset/interface.c cs->minor_index, cs 449 drivers/staging/isdn/gigaset/interface.c cs->ops->set_modem_ctrl(cs, control_state, new_state); cs 453 drivers/staging/isdn/gigaset/interface.c cs->ops->baud_rate(cs, cflag & CBAUD); cs 457 drivers/staging/isdn/gigaset/interface.c gig_dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index); cs 459 drivers/staging/isdn/gigaset/interface.c cs->ops->set_modem_ctrl(cs, control_state, new_state); cs 467 drivers/staging/isdn/gigaset/interface.c cs->ops->set_line_ctrl(cs, cflag); cs 470 drivers/staging/isdn/gigaset/interface.c cs->control_state = control_state; cs 473 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 497 drivers/staging/isdn/gigaset/interface.c struct cardstate *cs = (struct cardstate *)data; cs 499 drivers/staging/isdn/gigaset/interface.c tty_port_tty_wakeup(&cs->port); cs 504 drivers/staging/isdn/gigaset/interface.c void gigaset_if_init(struct cardstate *cs) cs 508 drivers/staging/isdn/gigaset/interface.c drv = cs->driver; cs 512 drivers/staging/isdn/gigaset/interface.c tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs); cs 514 drivers/staging/isdn/gigaset/interface.c mutex_lock(&cs->mutex); cs 515 drivers/staging/isdn/gigaset/interface.c cs->tty_dev = tty_port_register_device(&cs->port, drv->tty, cs 516 drivers/staging/isdn/gigaset/interface.c cs->minor_index, NULL); cs 518 drivers/staging/isdn/gigaset/interface.c if (!IS_ERR(cs->tty_dev)) cs 519 drivers/staging/isdn/gigaset/interface.c dev_set_drvdata(cs->tty_dev, cs); cs 522 drivers/staging/isdn/gigaset/interface.c cs->tty_dev = NULL; cs 524 drivers/staging/isdn/gigaset/interface.c mutex_unlock(&cs->mutex); cs 527 drivers/staging/isdn/gigaset/interface.c void gigaset_if_free(struct cardstate *cs) cs 531 drivers/staging/isdn/gigaset/interface.c drv = cs->driver; cs 535 drivers/staging/isdn/gigaset/interface.c tasklet_disable(&cs->if_wake_tasklet); cs 536 drivers/staging/isdn/gigaset/interface.c tasklet_kill(&cs->if_wake_tasklet); cs 537 drivers/staging/isdn/gigaset/interface.c cs->tty_dev = NULL; cs 538 drivers/staging/isdn/gigaset/interface.c tty_unregister_device(drv->tty, cs->minor_index); cs 550 drivers/staging/isdn/gigaset/interface.c void gigaset_if_receive(struct cardstate *cs, cs 553 drivers/staging/isdn/gigaset/interface.c tty_insert_flip_string(&cs->port, buffer, len); cs 554 drivers/staging/isdn/gigaset/interface.c tty_flip_buffer_push(&cs->port); cs 505 drivers/staging/isdn/gigaset/isocdata.c dev_warn(bcs->cs->dev, "received oversized packet discarded\n"); cs 534 drivers/staging/isdn/gigaset/isocdata.c struct cardstate *cs = bcs->cs; cs 549 drivers/staging/isdn/gigaset/isocdata.c dev_notice(cs->dev, "received short frame (%d octets)\n", cs 555 drivers/staging/isdn/gigaset/isocdata.c dev_notice(cs->dev, "frame check error\n"); cs 583 drivers/staging/isdn/gigaset/isocdata.c dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits); cs 893 drivers/staging/isdn/gigaset/isocdata.c struct cardstate *cs = inbuf->cs; cs 894 drivers/staging/isdn/gigaset/isocdata.c unsigned cbytes = cs->cbytes; cs 901 drivers/staging/isdn/gigaset/isocdata.c if (cbytes == 0 && cs->respdata[0] == '\r') { cs 903 drivers/staging/isdn/gigaset/isocdata.c cs->respdata[0] = 0; cs 910 drivers/staging/isdn/gigaset/isocdata.c dev_warn(cs->dev, "response too large (%d)\n", cs 914 drivers/staging/isdn/gigaset/isocdata.c cs->cbytes = cbytes; cs 916 drivers/staging/isdn/gigaset/isocdata.c cbytes, cs->respdata); cs 917 drivers/staging/isdn/gigaset/isocdata.c gigaset_handle_modem_response(cs); cs 921 drivers/staging/isdn/gigaset/isocdata.c cs->respdata[0] = c; cs 926 drivers/staging/isdn/gigaset/isocdata.c cs->respdata[cbytes] = c; cs 932 drivers/staging/isdn/gigaset/isocdata.c cs->cbytes = cbytes; cs 940 drivers/staging/isdn/gigaset/isocdata.c struct cardstate *cs = inbuf->cs; cs 953 drivers/staging/isdn/gigaset/isocdata.c if (cs->mstate == MS_LOCKED) { cs 956 drivers/staging/isdn/gigaset/isocdata.c gigaset_if_receive(inbuf->cs, src, numbytes); cs 991 drivers/staging/isdn/gigaset/isocdata.c spin_lock_irqsave(&bcs->cs->lock, flags); cs 992 drivers/staging/isdn/gigaset/isocdata.c if (!bcs->cs->connected) { cs 993 drivers/staging/isdn/gigaset/isocdata.c spin_unlock_irqrestore(&bcs->cs->lock, flags); cs 1003 drivers/staging/isdn/gigaset/isocdata.c spin_unlock_irqrestore(&bcs->cs->lock, flags); cs 18 drivers/staging/isdn/gigaset/proc.c struct cardstate *cs = dev_get_drvdata(dev); cs 20 drivers/staging/isdn/gigaset/proc.c return sprintf(buf, "%u\n", cs->cidmode); cs 26 drivers/staging/isdn/gigaset/proc.c struct cardstate *cs = dev_get_drvdata(dev); cs 37 drivers/staging/isdn/gigaset/proc.c if (mutex_lock_interruptible(&cs->mutex)) cs 40 drivers/staging/isdn/gigaset/proc.c cs->waiting = 1; cs 41 drivers/staging/isdn/gigaset/proc.c if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE, cs 43 drivers/staging/isdn/gigaset/proc.c cs->waiting = 0; cs 44 drivers/staging/isdn/gigaset/proc.c mutex_unlock(&cs->mutex); cs 47 drivers/staging/isdn/gigaset/proc.c gigaset_schedule_event(cs); cs 49 drivers/staging/isdn/gigaset/proc.c wait_event(cs->waitqueue, !cs->waiting); cs 51 drivers/staging/isdn/gigaset/proc.c mutex_unlock(&cs->mutex); cs 59 drivers/staging/isdn/gigaset/proc.c void gigaset_free_dev_sysfs(struct cardstate *cs) cs 61 drivers/staging/isdn/gigaset/proc.c if (!cs->tty_dev) cs 65 drivers/staging/isdn/gigaset/proc.c device_remove_file(cs->tty_dev, &dev_attr_cidmode); cs 69 drivers/staging/isdn/gigaset/proc.c void gigaset_init_dev_sysfs(struct cardstate *cs) cs 71 drivers/staging/isdn/gigaset/proc.c if (!cs->tty_dev) cs 75 drivers/staging/isdn/gigaset/proc.c if (device_create_file(cs->tty_dev, &dev_attr_cidmode)) cs 60 drivers/staging/isdn/gigaset/ser-gigaset.c static int write_modem(struct cardstate *cs) cs 62 drivers/staging/isdn/gigaset/ser-gigaset.c struct tty_struct *tty = cs->hw.ser->tty; cs 63 drivers/staging/isdn/gigaset/ser-gigaset.c struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ cs 81 drivers/staging/isdn/gigaset/ser-gigaset.c flush_send_queue(cs); cs 101 drivers/staging/isdn/gigaset/ser-gigaset.c static int send_cb(struct cardstate *cs) cs 103 drivers/staging/isdn/gigaset/ser-gigaset.c struct tty_struct *tty = cs->hw.ser->tty; cs 110 drivers/staging/isdn/gigaset/ser-gigaset.c cb = cs->cmdbuf; cs 120 drivers/staging/isdn/gigaset/ser-gigaset.c flush_send_queue(cs); cs 126 drivers/staging/isdn/gigaset/ser-gigaset.c sent, cb->len, cs->cmdbytes); cs 130 drivers/staging/isdn/gigaset/ser-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 131 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbytes -= cs->curlen; cs 133 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbuf = cb = cb->next; cs 136 drivers/staging/isdn/gigaset/ser-gigaset.c cs->curlen = cb->len; cs 138 drivers/staging/isdn/gigaset/ser-gigaset.c cs->lastcmdbuf = NULL; cs 139 drivers/staging/isdn/gigaset/ser-gigaset.c cs->curlen = 0; cs 141 drivers/staging/isdn/gigaset/ser-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 158 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs = (struct cardstate *) data; cs 163 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs) { cs 167 drivers/staging/isdn/gigaset/ser-gigaset.c bcs = cs->bcs; cs 174 drivers/staging/isdn/gigaset/ser-gigaset.c sent = send_cb(cs); cs 193 drivers/staging/isdn/gigaset/ser-gigaset.c if (write_modem(cs) < 0) cs 200 drivers/staging/isdn/gigaset/ser-gigaset.c static void flush_send_queue(struct cardstate *cs) cs 207 drivers/staging/isdn/gigaset/ser-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 208 drivers/staging/isdn/gigaset/ser-gigaset.c while ((cb = cs->cmdbuf) != NULL) { cs 209 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbuf = cb->next; cs 214 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbuf = cs->lastcmdbuf = NULL; cs 215 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbytes = cs->curlen = 0; cs 216 drivers/staging/isdn/gigaset/ser-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 219 drivers/staging/isdn/gigaset/ser-gigaset.c if (cs->bcs->tx_skb) cs 220 drivers/staging/isdn/gigaset/ser-gigaset.c dev_kfree_skb_any(cs->bcs->tx_skb); cs 221 drivers/staging/isdn/gigaset/ser-gigaset.c while ((skb = skb_dequeue(&cs->bcs->squeue)) != NULL) cs 239 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) cs 243 drivers/staging/isdn/gigaset/ser-gigaset.c gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? cs 247 drivers/staging/isdn/gigaset/ser-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 248 drivers/staging/isdn/gigaset/ser-gigaset.c cb->prev = cs->lastcmdbuf; cs 249 drivers/staging/isdn/gigaset/ser-gigaset.c if (cs->lastcmdbuf) cs 250 drivers/staging/isdn/gigaset/ser-gigaset.c cs->lastcmdbuf->next = cb; cs 252 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbuf = cb; cs 253 drivers/staging/isdn/gigaset/ser-gigaset.c cs->curlen = cb->len; cs 255 drivers/staging/isdn/gigaset/ser-gigaset.c cs->cmdbytes += cb->len; cs 256 drivers/staging/isdn/gigaset/ser-gigaset.c cs->lastcmdbuf = cb; cs 257 drivers/staging/isdn/gigaset/ser-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 259 drivers/staging/isdn/gigaset/ser-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 260 drivers/staging/isdn/gigaset/ser-gigaset.c if (cs->connected) cs 261 drivers/staging/isdn/gigaset/ser-gigaset.c tasklet_schedule(&cs->write_tasklet); cs 262 drivers/staging/isdn/gigaset/ser-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 274 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_write_room(struct cardstate *cs) cs 278 drivers/staging/isdn/gigaset/ser-gigaset.c bytes = cs->cmdbytes; cs 290 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_chars_in_buffer(struct cardstate *cs) cs 292 drivers/staging/isdn/gigaset/ser-gigaset.c return cs->cmdbytes; cs 302 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) cs 363 drivers/staging/isdn/gigaset/ser-gigaset.c static void gigaset_freecshw(struct cardstate *cs) cs 365 drivers/staging/isdn/gigaset/ser-gigaset.c tasklet_kill(&cs->write_tasklet); cs 366 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs->hw.ser) cs 368 drivers/staging/isdn/gigaset/ser-gigaset.c platform_device_unregister(&cs->hw.ser->dev); cs 380 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_initcshw(struct cardstate *cs) cs 390 drivers/staging/isdn/gigaset/ser-gigaset.c cs->hw.ser = scs; cs 392 drivers/staging/isdn/gigaset/ser-gigaset.c cs->hw.ser->dev.name = GIGASET_MODULENAME; cs 393 drivers/staging/isdn/gigaset/ser-gigaset.c cs->hw.ser->dev.id = cs->minor_index; cs 394 drivers/staging/isdn/gigaset/ser-gigaset.c cs->hw.ser->dev.dev.release = gigaset_device_release; cs 395 drivers/staging/isdn/gigaset/ser-gigaset.c rc = platform_device_register(&cs->hw.ser->dev); cs 398 drivers/staging/isdn/gigaset/ser-gigaset.c kfree(cs->hw.ser); cs 399 drivers/staging/isdn/gigaset/ser-gigaset.c cs->hw.ser = NULL; cs 403 drivers/staging/isdn/gigaset/ser-gigaset.c tasklet_init(&cs->write_tasklet, cs 404 drivers/staging/isdn/gigaset/ser-gigaset.c gigaset_modem_fill, (unsigned long) cs); cs 416 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, cs 419 drivers/staging/isdn/gigaset/ser-gigaset.c struct tty_struct *tty = cs->hw.ser->tty; cs 434 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) cs 439 drivers/staging/isdn/gigaset/ser-gigaset.c static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) cs 470 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs = tty->disc_data; cs 472 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs || !cs->hw.ser) { cs 476 drivers/staging/isdn/gigaset/ser-gigaset.c atomic_inc(&cs->hw.ser->refcnt); cs 477 drivers/staging/isdn/gigaset/ser-gigaset.c return cs; cs 480 drivers/staging/isdn/gigaset/ser-gigaset.c static void cs_put(struct cardstate *cs) cs 482 drivers/staging/isdn/gigaset/ser-gigaset.c if (atomic_dec_and_test(&cs->hw.ser->refcnt)) cs 483 drivers/staging/isdn/gigaset/ser-gigaset.c complete(&cs->hw.ser->dead_cmp); cs 493 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs; cs 506 drivers/staging/isdn/gigaset/ser-gigaset.c cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); cs 507 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs) { cs 512 drivers/staging/isdn/gigaset/ser-gigaset.c cs->dev = &cs->hw.ser->dev.dev; cs 513 drivers/staging/isdn/gigaset/ser-gigaset.c cs->hw.ser->tty = tty; cs 514 drivers/staging/isdn/gigaset/ser-gigaset.c atomic_set(&cs->hw.ser->refcnt, 1); cs 515 drivers/staging/isdn/gigaset/ser-gigaset.c init_completion(&cs->hw.ser->dead_cmp); cs 516 drivers/staging/isdn/gigaset/ser-gigaset.c tty->disc_data = cs; cs 532 drivers/staging/isdn/gigaset/ser-gigaset.c cs->mstate = MS_LOCKED; cs 533 drivers/staging/isdn/gigaset/ser-gigaset.c rc = gigaset_start(cs); cs 535 drivers/staging/isdn/gigaset/ser-gigaset.c tasklet_kill(&cs->write_tasklet); cs 545 drivers/staging/isdn/gigaset/ser-gigaset.c gigaset_freecs(cs); cs 556 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs = tty->disc_data; cs 560 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs) { cs 568 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs->hw.ser) cs 572 drivers/staging/isdn/gigaset/ser-gigaset.c if (!atomic_dec_and_test(&cs->hw.ser->refcnt)) cs 573 drivers/staging/isdn/gigaset/ser-gigaset.c wait_for_completion(&cs->hw.ser->dead_cmp); cs 577 drivers/staging/isdn/gigaset/ser-gigaset.c gigaset_stop(cs); cs 578 drivers/staging/isdn/gigaset/ser-gigaset.c tasklet_kill(&cs->write_tasklet); cs 579 drivers/staging/isdn/gigaset/ser-gigaset.c flush_send_queue(cs); cs 580 drivers/staging/isdn/gigaset/ser-gigaset.c cs->dev = NULL; cs 581 drivers/staging/isdn/gigaset/ser-gigaset.c gigaset_freecs(cs); cs 607 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs = cs_get(tty); cs 611 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs) cs 630 drivers/staging/isdn/gigaset/ser-gigaset.c flush_send_queue(cs); cs 640 drivers/staging/isdn/gigaset/ser-gigaset.c cs_put(cs); cs 660 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs = cs_get(tty); cs 664 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs) cs 666 drivers/staging/isdn/gigaset/ser-gigaset.c inbuf = cs->inbuf; cs 668 drivers/staging/isdn/gigaset/ser-gigaset.c dev_err(cs->dev, "%s: no inbuf\n", __func__); cs 669 drivers/staging/isdn/gigaset/ser-gigaset.c cs_put(cs); cs 691 drivers/staging/isdn/gigaset/ser-gigaset.c dev_err(cs->dev, cs 705 drivers/staging/isdn/gigaset/ser-gigaset.c gigaset_schedule_event(cs); cs 706 drivers/staging/isdn/gigaset/ser-gigaset.c cs_put(cs); cs 715 drivers/staging/isdn/gigaset/ser-gigaset.c struct cardstate *cs = cs_get(tty); cs 718 drivers/staging/isdn/gigaset/ser-gigaset.c if (!cs) cs 720 drivers/staging/isdn/gigaset/ser-gigaset.c tasklet_schedule(&cs->write_tasklet); cs 721 drivers/staging/isdn/gigaset/ser-gigaset.c cs_put(cs); cs 151 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, cs 154 drivers/staging/isdn/gigaset/usb-gigaset.c struct usb_device *udev = cs->hw.usb->udev; cs 175 drivers/staging/isdn/gigaset/usb-gigaset.c static int set_value(struct cardstate *cs, u8 req, u16 val) cs 177 drivers/staging/isdn/gigaset/usb-gigaset.c struct usb_device *udev = cs->hw.usb->udev; cs 197 drivers/staging/isdn/gigaset/usb-gigaset.c 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/); cs 208 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) cs 228 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "unsupported baudrate request 0x%x," cs 234 drivers/staging/isdn/gigaset/usb-gigaset.c return set_value(cs, 1, val); cs 241 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) cs 260 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n"); cs 273 drivers/staging/isdn/gigaset/usb-gigaset.c return set_value(cs, 3, val); cs 292 drivers/staging/isdn/gigaset/usb-gigaset.c static int write_modem(struct cardstate *cs); cs 293 drivers/staging/isdn/gigaset/usb-gigaset.c static int send_cb(struct cardstate *cs); cs 301 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs = (struct cardstate *) data; cs 302 drivers/staging/isdn/gigaset/usb-gigaset.c struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ cs 306 drivers/staging/isdn/gigaset/usb-gigaset.c if (cs->hw.usb->busy) { cs 313 drivers/staging/isdn/gigaset/usb-gigaset.c if (cs->cmdbuf) { /* commands to send? */ cs 315 drivers/staging/isdn/gigaset/usb-gigaset.c if (send_cb(cs) < 0) { cs 333 drivers/staging/isdn/gigaset/usb-gigaset.c if (write_modem(cs) < 0) { cs 344 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs = urb->context; cs 345 drivers/staging/isdn/gigaset/usb-gigaset.c struct inbuf_t *inbuf = cs->inbuf; cs 356 drivers/staging/isdn/gigaset/usb-gigaset.c src = cs->hw.usb->rcvbuf; cs 358 drivers/staging/isdn/gigaset/usb-gigaset.c dev_warn(cs->dev, cs 365 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_schedule_event(inbuf->cs); cs 379 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 380 drivers/staging/isdn/gigaset/usb-gigaset.c if (!cs->connected) { cs 381 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 386 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 388 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "error %d resubmitting URB\n", -r); cs 395 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs = urb->context; cs 404 drivers/staging/isdn/gigaset/usb-gigaset.c cs->hw.usb->busy = 0; cs 407 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "bulk transfer failed (status %d)\n", cs 413 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 414 drivers/staging/isdn/gigaset/usb-gigaset.c if (!cs->connected) { cs 417 drivers/staging/isdn/gigaset/usb-gigaset.c cs->hw.usb->busy = 0; cs 418 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_schedule(&cs->write_tasklet); cs 420 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 423 drivers/staging/isdn/gigaset/usb-gigaset.c static int send_cb(struct cardstate *cs) cs 425 drivers/staging/isdn/gigaset/usb-gigaset.c struct cmdbuf_t *cb = cs->cmdbuf; cs 429 drivers/staging/isdn/gigaset/usb-gigaset.c struct usb_cardstate *ucs = cs->hw.usb; cs 433 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 434 drivers/staging/isdn/gigaset/usb-gigaset.c cs->cmdbytes -= cs->curlen; cs 436 drivers/staging/isdn/gigaset/usb-gigaset.c cs->curlen, cs->cmdbytes); cs 437 drivers/staging/isdn/gigaset/usb-gigaset.c cs->cmdbuf = cb->next; cs 438 drivers/staging/isdn/gigaset/usb-gigaset.c if (cs->cmdbuf) { cs 439 drivers/staging/isdn/gigaset/usb-gigaset.c cs->cmdbuf->prev = NULL; cs 440 drivers/staging/isdn/gigaset/usb-gigaset.c cs->curlen = cs->cmdbuf->len; cs 442 drivers/staging/isdn/gigaset/usb-gigaset.c cs->lastcmdbuf = NULL; cs 443 drivers/staging/isdn/gigaset/usb-gigaset.c cs->curlen = 0; cs 445 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 451 drivers/staging/isdn/gigaset/usb-gigaset.c cb = cs->cmdbuf; cs 462 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_write_bulk_callback, cs); cs 468 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 469 drivers/staging/isdn/gigaset/usb-gigaset.c status = cs->connected ? cs 472 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 476 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, cs 489 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) cs 494 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? cs 498 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->cmdlock, flags); cs 499 drivers/staging/isdn/gigaset/usb-gigaset.c cb->prev = cs->lastcmdbuf; cs 500 drivers/staging/isdn/gigaset/usb-gigaset.c if (cs->lastcmdbuf) cs 501 drivers/staging/isdn/gigaset/usb-gigaset.c cs->lastcmdbuf->next = cb; cs 503 drivers/staging/isdn/gigaset/usb-gigaset.c cs->cmdbuf = cb; cs 504 drivers/staging/isdn/gigaset/usb-gigaset.c cs->curlen = cb->len; cs 506 drivers/staging/isdn/gigaset/usb-gigaset.c cs->cmdbytes += cb->len; cs 507 drivers/staging/isdn/gigaset/usb-gigaset.c cs->lastcmdbuf = cb; cs 508 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->cmdlock, flags); cs 510 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 512 drivers/staging/isdn/gigaset/usb-gigaset.c if (cs->connected) cs 513 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_schedule(&cs->write_tasklet); cs 514 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 518 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_write_room(struct cardstate *cs) cs 522 drivers/staging/isdn/gigaset/usb-gigaset.c bytes = cs->cmdbytes; cs 526 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_chars_in_buffer(struct cardstate *cs) cs 528 drivers/staging/isdn/gigaset/usb-gigaset.c return cs->cmdbytes; cs 536 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) cs 538 drivers/staging/isdn/gigaset/usb-gigaset.c struct usb_device *udev = cs->hw.usb->udev; cs 541 drivers/staging/isdn/gigaset/usb-gigaset.c memcpy(cs->hw.usb->bchars, buf, 6); cs 564 drivers/staging/isdn/gigaset/usb-gigaset.c static void gigaset_freecshw(struct cardstate *cs) cs 566 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_kill(&cs->write_tasklet); cs 567 drivers/staging/isdn/gigaset/usb-gigaset.c kfree(cs->hw.usb); cs 570 drivers/staging/isdn/gigaset/usb-gigaset.c static int gigaset_initcshw(struct cardstate *cs) cs 574 drivers/staging/isdn/gigaset/usb-gigaset.c cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL); cs 586 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_init(&cs->write_tasklet, cs 587 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_modem_fill, (unsigned long) cs); cs 593 drivers/staging/isdn/gigaset/usb-gigaset.c static int write_modem(struct cardstate *cs) cs 597 drivers/staging/isdn/gigaset/usb-gigaset.c struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ cs 598 drivers/staging/isdn/gigaset/usb-gigaset.c struct usb_cardstate *ucs = cs->hw.usb; cs 616 drivers/staging/isdn/gigaset/usb-gigaset.c spin_lock_irqsave(&cs->lock, flags); cs 617 drivers/staging/isdn/gigaset/usb-gigaset.c if (cs->connected) { cs 622 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_write_bulk_callback, cs); cs 627 drivers/staging/isdn/gigaset/usb-gigaset.c spin_unlock_irqrestore(&cs->lock, flags); cs 630 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "could not submit urb (error %d)\n", -ret); cs 653 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs = NULL; cs 692 drivers/staging/isdn/gigaset/usb-gigaset.c cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); cs 693 drivers/staging/isdn/gigaset/usb-gigaset.c if (!cs) cs 695 drivers/staging/isdn/gigaset/usb-gigaset.c ucs = cs->hw.usb; cs 701 drivers/staging/isdn/gigaset/usb-gigaset.c cs->dev = &interface->dev; cs 704 drivers/staging/isdn/gigaset/usb-gigaset.c usb_set_intfdata(interface, cs); cs 719 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n"); cs 726 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n"); cs 743 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "No free urbs available\n"); cs 751 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); cs 760 drivers/staging/isdn/gigaset/usb-gigaset.c cs, endpoint->bInterval); cs 764 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval); cs 770 drivers/staging/isdn/gigaset/usb-gigaset.c cs->mstate = MS_LOCKED; cs 772 drivers/staging/isdn/gigaset/usb-gigaset.c retval = gigaset_start(cs); cs 774 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_kill(&cs->write_tasklet); cs 791 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_freecs(cs); cs 797 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs; cs 800 drivers/staging/isdn/gigaset/usb-gigaset.c cs = usb_get_intfdata(interface); cs 801 drivers/staging/isdn/gigaset/usb-gigaset.c ucs = cs->hw.usb; cs 803 drivers/staging/isdn/gigaset/usb-gigaset.c dev_info(cs->dev, "disconnecting Gigaset USB adapter\n"); cs 807 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_stop(cs); cs 810 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_kill(&cs->write_tasklet); cs 824 drivers/staging/isdn/gigaset/usb-gigaset.c cs->dev = NULL; cs 825 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_freecs(cs); cs 833 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs = usb_get_intfdata(intf); cs 836 drivers/staging/isdn/gigaset/usb-gigaset.c cs->connected = 0; /* prevent rescheduling */ cs 837 drivers/staging/isdn/gigaset/usb-gigaset.c usb_kill_urb(cs->hw.usb->read_urb); cs 838 drivers/staging/isdn/gigaset/usb-gigaset.c tasklet_kill(&cs->write_tasklet); cs 839 drivers/staging/isdn/gigaset/usb-gigaset.c usb_kill_urb(cs->hw.usb->bulk_out_urb); cs 850 drivers/staging/isdn/gigaset/usb-gigaset.c struct cardstate *cs = usb_get_intfdata(intf); cs 854 drivers/staging/isdn/gigaset/usb-gigaset.c cs->connected = 1; cs 855 drivers/staging/isdn/gigaset/usb-gigaset.c rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL); cs 857 drivers/staging/isdn/gigaset/usb-gigaset.c dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc); cs 939 drivers/staging/isdn/gigaset/usb-gigaset.c gigaset_shutdown(driver->cs + i); cs 125 drivers/staging/kpc2000/kpc2000_spi.c unsigned int cs : 4; /* Chip Select */ cs 162 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_read_reg(struct kp_spi_controller_state *cs, int idx) cs 164 drivers/staging/kpc2000/kpc2000_spi.c u64 __iomem *addr = cs->base; cs 168 drivers/staging/kpc2000/kpc2000_spi.c if ((idx == KP_SPI_REG_CONFIG) && (cs->conf_cache >= 0)) cs 169 drivers/staging/kpc2000/kpc2000_spi.c return cs->conf_cache; cs 176 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_write_reg(struct kp_spi_controller_state *cs, int idx, u64 val) cs 178 drivers/staging/kpc2000/kpc2000_spi.c u64 __iomem *addr = cs->base; cs 183 drivers/staging/kpc2000/kpc2000_spi.c cs->conf_cache = val; cs 187 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_wait_for_reg_bit(struct kp_spi_controller_state *cs, int idx, cs 193 drivers/staging/kpc2000/kpc2000_spi.c while (!(kp_spi_read_reg(cs, idx) & bit)) { cs 195 drivers/staging/kpc2000/kpc2000_spi.c if (!(kp_spi_read_reg(cs, idx) & bit)) cs 208 drivers/staging/kpc2000/kpc2000_spi.c struct kp_spi_controller_state *cs = spidev->controller_state; cs 222 drivers/staging/kpc2000/kpc2000_spi.c res = kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, cs 227 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, val); cs 235 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, 0x00); cs 236 drivers/staging/kpc2000/kpc2000_spi.c res = kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, cs 241 drivers/staging/kpc2000/kpc2000_spi.c test = kp_spi_read_reg(cs, KP_SPI_REG_RXDATA); cs 247 drivers/staging/kpc2000/kpc2000_spi.c if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, cs 265 drivers/staging/kpc2000/kpc2000_spi.c struct kp_spi_controller_state *cs; cs 268 drivers/staging/kpc2000/kpc2000_spi.c cs = spidev->controller_state; cs 269 drivers/staging/kpc2000/kpc2000_spi.c if (!cs) { cs 270 drivers/staging/kpc2000/kpc2000_spi.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 271 drivers/staging/kpc2000/kpc2000_spi.c if (!cs) cs 273 drivers/staging/kpc2000/kpc2000_spi.c cs->base = kpspi->base; cs 274 drivers/staging/kpc2000/kpc2000_spi.c cs->conf_cache = -1; cs 275 drivers/staging/kpc2000/kpc2000_spi.c spidev->controller_state = cs; cs 280 drivers/staging/kpc2000/kpc2000_spi.c sc.bitfield.cs = spidev->chip_select; cs 291 drivers/staging/kpc2000/kpc2000_spi.c struct kp_spi_controller_state *cs; cs 303 drivers/staging/kpc2000/kpc2000_spi.c cs = spidev->controller_state; cs 337 drivers/staging/kpc2000/kpc2000_spi.c sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG); cs 339 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg); cs 342 drivers/staging/kpc2000/kpc2000_spi.c if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS, cs 362 drivers/staging/kpc2000/kpc2000_spi.c sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG); cs 376 drivers/staging/kpc2000/kpc2000_spi.c sc.bitfield.cs = spidev->chip_select; cs 379 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg); cs 396 drivers/staging/kpc2000/kpc2000_spi.c sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG); cs 398 drivers/staging/kpc2000/kpc2000_spi.c kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg); cs 413 drivers/staging/kpc2000/kpc2000_spi.c struct kp_spi_controller_state *cs = spidev->controller_state; cs 415 drivers/staging/kpc2000/kpc2000_spi.c kfree(cs); cs 466 drivers/staging/media/imx/imx-ic-prpencvf.c incc->cs, cs 468 drivers/staging/media/imx/imx-ic-prpencvf.c outcc->cs); cs 595 drivers/staging/media/imx/imx-ic-prpencvf.c incc->cs, cs 597 drivers/staging/media/imx/imx-ic-prpencvf.c outcc->cs); cs 172 drivers/staging/media/imx/imx-media-capture.c u32 cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ? cs 216 drivers/staging/media/imx/imx-media-capture.c cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ? cs 554 drivers/staging/media/imx/imx-media-capture.c priv->vdev.cc->cs != cc->cs || cs 1258 drivers/staging/media/imx/imx-media-csi.c u32 cs_sel = (incc->cs == IPUV3_COLORSPACE_YUV) ? cs 1445 drivers/staging/media/imx/imx-media-csi.c u32 cs_sel = (incc->cs == IPUV3_COLORSPACE_YUV) ? cs 24 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 32 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 41 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 46 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 51 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 56 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 61 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 75 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 84 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 89 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 97 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 103 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 109 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 115 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 126 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 137 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 148 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 159 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 165 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 174 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 184 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 188 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 201 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_YUV, cs 213 drivers/staging/media/imx/imx-media-utils.c .cs = IPUV3_COLORSPACE_RGB, cs 224 drivers/staging/media/imx/imx-media-utils.c mbus->colorspace = (fmt->cs == IPUV3_COLORSPACE_RGB) ? cs 229 drivers/staging/media/imx/imx-media-utils.c V4L2_MAP_QUANTIZATION_DEFAULT(fmt->cs == IPUV3_COLORSPACE_RGB, cs 533 drivers/staging/media/imx/imx-media-utils.c if (cc && cc->cs == IPUV3_COLORSPACE_RGB) cs 594 drivers/staging/media/imx/imx-media-utils.c if (cc->ipufmt && cc->cs == IPUV3_COLORSPACE_YUV) { cs 76 drivers/staging/media/imx/imx-media.h enum ipu_color_space cs; cs 199 drivers/thunderbolt/icm.c static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, cs 208 drivers/thunderbolt/icm.c cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; cs 220 drivers/thunderbolt/icm.c static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, cs 231 drivers/thunderbolt/icm.c cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; cs 3403 drivers/tty/cyclades.c const struct zfile_config *c, *cs; cs 3412 drivers/tty/cyclades.c cs = ptr + h->config_offset; cs 3415 drivers/tty/cyclades.c if ((void *)(cs + h->n_config) > ptr + len || cs 3433 drivers/tty/cyclades.c for (c = cs; c < cs + h->n_config; c++) { cs 3454 drivers/tty/cyclades.c for (c = cs; c < cs + h->n_config; c++) cs 2181 drivers/tty/n_tty.c unsigned char cs; cs 2185 drivers/tty/n_tty.c cs = tty->link->ctrl_status; cs 2188 drivers/tty/n_tty.c if (put_user(cs, b)) { cs 233 drivers/tty/serial/ar933x_uart.c unsigned int cs; cs 244 drivers/tty/serial/ar933x_uart.c cs = 0; cs 247 drivers/tty/serial/ar933x_uart.c cs |= AR933X_UART_CS_PARITY_EVEN; cs 249 drivers/tty/serial/ar933x_uart.c cs |= AR933X_UART_CS_PARITY_ODD; cs 251 drivers/tty/serial/ar933x_uart.c cs |= AR933X_UART_CS_PARITY_NONE; cs 283 drivers/tty/serial/ar933x_uart.c ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs); cs 3429 drivers/tty/tty_io.c struct console *cs[16]; cs 3442 drivers/tty/tty_io.c cs[i++] = c; cs 3443 drivers/tty/tty_io.c if (i >= ARRAY_SIZE(cs)) cs 3447 drivers/tty/tty_io.c int index = cs[i]->index; cs 3448 drivers/tty/tty_io.c struct tty_driver *drv = cs[i]->device(cs[i], &index); cs 3451 drivers/tty/tty_io.c if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR)) cs 3455 drivers/tty/tty_io.c cs[i]->name, cs[i]->index); cs 269 drivers/usb/cdns3/gadget.c static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) cs 274 drivers/usb/cdns3/gadget.c *cs ^= 1; cs 289 drivers/usb/gadget/function/f_uac1_legacy.c struct list_head cs; cs 393 drivers/usb/gadget/function/f_uac1_legacy.c struct usb_audio_control_selector *cs; cs 399 drivers/usb/gadget/function/f_uac1_legacy.c list_for_each_entry(cs, &audio->cs, list) { cs 400 drivers/usb/gadget/function/f_uac1_legacy.c if (cs->id == id) { cs 401 drivers/usb/gadget/function/f_uac1_legacy.c list_for_each_entry(con, &cs->control, list) { cs 430 drivers/usb/gadget/function/f_uac1_legacy.c struct usb_audio_control_selector *cs; cs 436 drivers/usb/gadget/function/f_uac1_legacy.c list_for_each_entry(cs, &audio->cs, list) { cs 437 drivers/usb/gadget/function/f_uac1_legacy.c if (cs->id == id) { cs 438 drivers/usb/gadget/function/f_uac1_legacy.c list_for_each_entry(con, &cs->control, list) { cs 786 drivers/usb/gadget/function/f_uac1_legacy.c INIT_LIST_HEAD(&audio->cs); cs 787 drivers/usb/gadget/function/f_uac1_legacy.c list_add(&feature_unit.list, &audio->cs); cs 154 drivers/usb/host/ehci-orion.c const struct mbus_dram_window *cs = dram->cs + i; cs 156 drivers/usb/host/ehci-orion.c wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) | cs 157 drivers/usb/host/ehci-orion.c (cs->mbus_attr << 8) | cs 159 drivers/usb/host/ehci-orion.c wrl(USB_WINDOW_BASE(i), cs->base); cs 35 drivers/usb/host/xhci-mvebu.c const struct mbus_dram_window *cs = dram->cs + win; cs 37 drivers/usb/host/xhci-mvebu.c writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | cs 41 drivers/usb/host/xhci-mvebu.c writel((cs->base & 0xffff0000), base + USB3_WIN_BASE(win)); cs 662 drivers/usb/serial/io_ti.c __u8 cs = 0; cs 665 drivers/usb/serial/io_ti.c cs = (__u8)(cs + buffer[i]); cs 667 drivers/usb/serial/io_ti.c if (cs != rom_desc->CheckSum) { cs 668 drivers/usb/serial/io_ti.c pr_debug("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs); cs 803 drivers/usb/serial/io_ti.c __u8 cs = 0; cs 847 drivers/usb/serial/io_ti.c cs = (__u8)(cs + buffer[i]); cs 858 drivers/usb/serial/io_ti.c i2c_header->CheckSum = cs; cs 1415 drivers/usb/serial/io_ti.c __u8 cs = 0; cs 1470 drivers/usb/serial/io_ti.c cs = (__u8)(cs + buffer[i]); cs 1478 drivers/usb/serial/io_ti.c header->CheckSum = cs; cs 1582 drivers/usb/serial/ti_usb_3410_5052.c u8 cs = 0; cs 1589 drivers/usb/serial/ti_usb_3410_5052.c cs = (u8)(cs + buffer[pos]); cs 1593 drivers/usb/serial/ti_usb_3410_5052.c header->bCheckSum = cs; cs 304 drivers/video/fbdev/core/svgalib.c u8 cs = 0x0d; cs 320 drivers/video/fbdev/core/svgalib.c cs = 0x0d; cs 323 drivers/video/fbdev/core/svgalib.c cs = 0x09; cs 326 drivers/video/fbdev/core/svgalib.c cs = 0x07; cs 329 drivers/video/fbdev/core/svgalib.c cs = 0x05; cs 332 drivers/video/fbdev/core/svgalib.c cs = 0x01; cs 341 drivers/video/fbdev/core/svgalib.c vga_wcrt(regbase, 0x0A, cs); /* set cursor start and enable it */ cs 311 drivers/video/fbdev/metronomefb.c u16 cs; cs 321 drivers/video/fbdev/metronomefb.c opcode = cs = 0xCC41; cs 323 drivers/video/fbdev/metronomefb.c opcode = cs = 0xCC40; cs 330 drivers/video/fbdev/metronomefb.c cs += par->metromem_cmd->args[i++]; cs 335 drivers/video/fbdev/metronomefb.c par->metromem_cmd->csum = cs; cs 344 drivers/video/fbdev/metronomefb.c u16 cs; cs 348 drivers/video/fbdev/metronomefb.c cs = par->metromem_cmd->opcode; cs 353 drivers/video/fbdev/metronomefb.c cs += par->metromem_cmd->args[i]; cs 360 drivers/video/fbdev/metronomefb.c par->metromem_cmd->csum = cs; cs 393 drivers/video/fbdev/metronomefb.c u16 cs; cs 400 drivers/video/fbdev/metronomefb.c cs = 0xCC20; cs 405 drivers/video/fbdev/metronomefb.c cs += par->metromem_cmd->args[i++]; cs 410 drivers/video/fbdev/metronomefb.c par->metromem_cmd->csum = cs; cs 222 drivers/w1/masters/omap_hdq.c u64 module_id, rn_le, cs, id; cs 234 drivers/w1/masters/omap_hdq.c cs = w1_calc_crc8((u8 *)&rn_le, 7); cs 235 drivers/w1/masters/omap_hdq.c id = (cs << 56) | module_id; cs 149 drivers/w1/slaves/w1_ds2805.c u8 cs; cs 193 drivers/w1/slaves/w1_ds2805.c cs = w1_read_8(sl->master); cs 194 drivers/w1/slaves/w1_ds2805.c if (cs != W1_F0D_CS_OK) { cs 195 drivers/w1/slaves/w1_ds2805.c dev_err(&sl->dev, "save to eeprom failed = CS=%#x\n", cs); cs 4030 fs/btrfs/tree-log.c u64 ds, dl, cs, cl; cs 4039 fs/btrfs/tree-log.c cs = btrfs_file_extent_offset(src, extent); cs 4044 fs/btrfs/tree-log.c cs = 0; cs 4050 fs/btrfs/tree-log.c ds + cs, ds + cs + cl - 1, cs 194 fs/dcache.c static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) cs 199 fs/dcache.c a = read_word_at_a_time(cs); cs 205 fs/dcache.c cs += sizeof(unsigned long); cs 217 fs/dcache.c static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) cs 220 fs/dcache.c if (*cs != *ct) cs 222 fs/dcache.c cs++; cs 249 fs/dcache.c const unsigned char *cs = READ_ONCE(dentry->d_name.name); cs 251 fs/dcache.c return dentry_string_cmp(cs, ct, tcount); cs 156 fs/efs/super.c __be32 cs, *ui; cs 173 fs/efs/super.c cs = *ui--; cs 174 fs/efs/super.c csum += be32_to_cpu(cs); cs 168 fs/erofs/super.c const char *cs = match_strdup(args); cs 171 fs/erofs/super.c if (!cs) { cs 176 fs/erofs/super.c if (!strcmp(cs, "disabled")) { cs 178 fs/erofs/super.c } else if (!strcmp(cs, "readahead")) { cs 180 fs/erofs/super.c } else if (!strcmp(cs, "readaround")) { cs 183 fs/erofs/super.c erofs_err(sb, "Unrecognized cache strategy \"%s\"", cs); cs 186 fs/erofs/super.c kfree(cs); cs 651 fs/fuse/dev.c static void fuse_copy_init(struct fuse_copy_state *cs, int write, cs 654 fs/fuse/dev.c memset(cs, 0, sizeof(*cs)); cs 655 fs/fuse/dev.c cs->write = write; cs 656 fs/fuse/dev.c cs->iter = iter; cs 660 fs/fuse/dev.c static void fuse_copy_finish(struct fuse_copy_state *cs) cs 662 fs/fuse/dev.c if (cs->currbuf) { cs 663 fs/fuse/dev.c struct pipe_buffer *buf = cs->currbuf; cs 665 fs/fuse/dev.c if (cs->write) cs 666 fs/fuse/dev.c buf->len = PAGE_SIZE - cs->len; cs 667 fs/fuse/dev.c cs->currbuf = NULL; cs 668 fs/fuse/dev.c } else if (cs->pg) { cs 669 fs/fuse/dev.c if (cs->write) { cs 670 fs/fuse/dev.c flush_dcache_page(cs->pg); cs 671 fs/fuse/dev.c set_page_dirty_lock(cs->pg); cs 673 fs/fuse/dev.c put_page(cs->pg); cs 675 fs/fuse/dev.c cs->pg = NULL; cs 682 fs/fuse/dev.c static int fuse_copy_fill(struct fuse_copy_state *cs) cs 687 fs/fuse/dev.c err = unlock_request(cs->req); cs 691 fs/fuse/dev.c fuse_copy_finish(cs); cs 692 fs/fuse/dev.c if (cs->pipebufs) { cs 693 fs/fuse/dev.c struct pipe_buffer *buf = cs->pipebufs; cs 695 fs/fuse/dev.c if (!cs->write) { cs 696 fs/fuse/dev.c err = pipe_buf_confirm(cs->pipe, buf); cs 700 fs/fuse/dev.c BUG_ON(!cs->nr_segs); cs 701 fs/fuse/dev.c cs->currbuf = buf; cs 702 fs/fuse/dev.c cs->pg = buf->page; cs 703 fs/fuse/dev.c cs->offset = buf->offset; cs 704 fs/fuse/dev.c cs->len = buf->len; cs 705 fs/fuse/dev.c cs->pipebufs++; cs 706 fs/fuse/dev.c cs->nr_segs--; cs 708 fs/fuse/dev.c if (cs->nr_segs == cs->pipe->buffers) cs 719 fs/fuse/dev.c cs->currbuf = buf; cs 720 fs/fuse/dev.c cs->pg = page; cs 721 fs/fuse/dev.c cs->offset = 0; cs 722 fs/fuse/dev.c cs->len = PAGE_SIZE; cs 723 fs/fuse/dev.c cs->pipebufs++; cs 724 fs/fuse/dev.c cs->nr_segs++; cs 728 fs/fuse/dev.c err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off); cs 732 fs/fuse/dev.c cs->len = err; cs 733 fs/fuse/dev.c cs->offset = off; cs 734 fs/fuse/dev.c cs->pg = page; cs 735 fs/fuse/dev.c iov_iter_advance(cs->iter, err); cs 738 fs/fuse/dev.c return lock_request(cs->req); cs 742 fs/fuse/dev.c static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) cs 744 fs/fuse/dev.c unsigned ncpy = min(*size, cs->len); cs 746 fs/fuse/dev.c void *pgaddr = kmap_atomic(cs->pg); cs 747 fs/fuse/dev.c void *buf = pgaddr + cs->offset; cs 749 fs/fuse/dev.c if (cs->write) cs 758 fs/fuse/dev.c cs->len -= ncpy; cs 759 fs/fuse/dev.c cs->offset += ncpy; cs 782 fs/fuse/dev.c static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) cs 787 fs/fuse/dev.c struct pipe_buffer *buf = cs->pipebufs; cs 789 fs/fuse/dev.c err = unlock_request(cs->req); cs 793 fs/fuse/dev.c fuse_copy_finish(cs); cs 795 fs/fuse/dev.c err = pipe_buf_confirm(cs->pipe, buf); cs 799 fs/fuse/dev.c BUG_ON(!cs->nr_segs); cs 800 fs/fuse/dev.c cs->currbuf = buf; cs 801 fs/fuse/dev.c cs->len = buf->len; cs 802 fs/fuse/dev.c cs->pipebufs++; cs 803 fs/fuse/dev.c cs->nr_segs--; cs 805 fs/fuse/dev.c if (cs->len != PAGE_SIZE) cs 808 fs/fuse/dev.c if (pipe_buf_steal(cs->pipe, buf) != 0) cs 846 fs/fuse/dev.c spin_lock(&cs->req->waitq.lock); cs 847 fs/fuse/dev.c if (test_bit(FR_ABORTED, &cs->req->flags)) cs 851 fs/fuse/dev.c spin_unlock(&cs->req->waitq.lock); cs 861 fs/fuse/dev.c cs->len = 0; cs 868 fs/fuse/dev.c cs->pg = buf->page; cs 869 fs/fuse/dev.c cs->offset = buf->offset; cs 871 fs/fuse/dev.c err = lock_request(cs->req); cs 878 fs/fuse/dev.c static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, cs 884 fs/fuse/dev.c if (cs->nr_segs == cs->pipe->buffers) cs 887 fs/fuse/dev.c err = unlock_request(cs->req); cs 891 fs/fuse/dev.c fuse_copy_finish(cs); cs 893 fs/fuse/dev.c buf = cs->pipebufs; cs 899 fs/fuse/dev.c cs->pipebufs++; cs 900 fs/fuse/dev.c cs->nr_segs++; cs 901 fs/fuse/dev.c cs->len = 0; cs 910 fs/fuse/dev.c static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, cs 920 fs/fuse/dev.c if (cs->write && cs->pipebufs && page) { cs 921 fs/fuse/dev.c return fuse_ref_page(cs, page, offset, count); cs 922 fs/fuse/dev.c } else if (!cs->len) { cs 923 fs/fuse/dev.c if (cs->move_pages && page && cs 925 fs/fuse/dev.c err = fuse_try_move_page(cs, pagep); cs 929 fs/fuse/dev.c err = fuse_copy_fill(cs); cs 937 fs/fuse/dev.c offset += fuse_copy_do(cs, &buf, &count); cs 940 fs/fuse/dev.c offset += fuse_copy_do(cs, NULL, &count); cs 942 fs/fuse/dev.c if (page && !cs->write) cs 948 fs/fuse/dev.c static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, cs 952 fs/fuse/dev.c struct fuse_req *req = cs->req; cs 961 fs/fuse/dev.c err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); cs 971 fs/fuse/dev.c static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) cs 974 fs/fuse/dev.c if (!cs->len) { cs 975 fs/fuse/dev.c int err = fuse_copy_fill(cs); cs 979 fs/fuse/dev.c fuse_copy_do(cs, &val, &size); cs 985 fs/fuse/dev.c static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, cs 995 fs/fuse/dev.c err = fuse_copy_pages(cs, arg->size, zeroing); cs 997 fs/fuse/dev.c err = fuse_copy_one(cs, arg->value, arg->size); cs 1022 fs/fuse/dev.c struct fuse_copy_state *cs, cs 1043 fs/fuse/dev.c err = fuse_copy_one(cs, &ih, sizeof(ih)); cs 1045 fs/fuse/dev.c err = fuse_copy_one(cs, &arg, sizeof(arg)); cs 1046 fs/fuse/dev.c fuse_copy_finish(cs); cs 1075 fs/fuse/dev.c struct fuse_copy_state *cs, cs 1096 fs/fuse/dev.c err = fuse_copy_one(cs, &ih, sizeof(ih)); cs 1098 fs/fuse/dev.c err = fuse_copy_one(cs, &arg, sizeof(arg)); cs 1099 fs/fuse/dev.c fuse_copy_finish(cs); cs 1108 fs/fuse/dev.c struct fuse_copy_state *cs, size_t nbytes) cs 1133 fs/fuse/dev.c err = fuse_copy_one(cs, &ih, sizeof(ih)); cs 1135 fs/fuse/dev.c err = fuse_copy_one(cs, &arg, sizeof(arg)); cs 1141 fs/fuse/dev.c err = fuse_copy_one(cs, &forget->forget_one, cs 1148 fs/fuse/dev.c fuse_copy_finish(cs); cs 1157 fs/fuse/dev.c struct fuse_copy_state *cs, cs 1162 fs/fuse/dev.c return fuse_read_single_forget(fiq, cs, nbytes); cs 1164 fs/fuse/dev.c return fuse_read_batch_forget(fiq, cs, nbytes); cs 1177 fs/fuse/dev.c struct fuse_copy_state *cs, size_t nbytes) cs 1229 fs/fuse/dev.c return fuse_read_interrupt(fiq, cs, nbytes, req); cs 1234 fs/fuse/dev.c return fuse_read_forget(fc, fiq, cs, nbytes); cs 1260 fs/fuse/dev.c cs->req = req; cs 1261 fs/fuse/dev.c err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); cs 1263 fs/fuse/dev.c err = fuse_copy_args(cs, args->in_numargs, args->in_pages, cs 1265 fs/fuse/dev.c fuse_copy_finish(cs); cs 1318 fs/fuse/dev.c struct fuse_copy_state cs; cs 1328 fs/fuse/dev.c fuse_copy_init(&cs, 1, to); cs 1330 fs/fuse/dev.c return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); cs 1340 fs/fuse/dev.c struct fuse_copy_state cs; cs 1351 fs/fuse/dev.c fuse_copy_init(&cs, 1, NULL); cs 1352 fs/fuse/dev.c cs.pipebufs = bufs; cs 1353 fs/fuse/dev.c cs.pipe = pipe; cs 1354 fs/fuse/dev.c ret = fuse_dev_do_read(fud, in, &cs, len); cs 1358 fs/fuse/dev.c if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { cs 1363 fs/fuse/dev.c for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { cs 1377 fs/fuse/dev.c for (; page_nr < cs.nr_segs; page_nr++) cs 1385 fs/fuse/dev.c struct fuse_copy_state *cs) cs 1393 fs/fuse/dev.c err = fuse_copy_one(cs, &outarg, sizeof(outarg)); cs 1397 fs/fuse/dev.c fuse_copy_finish(cs); cs 1401 fs/fuse/dev.c fuse_copy_finish(cs); cs 1406 fs/fuse/dev.c struct fuse_copy_state *cs) cs 1414 fs/fuse/dev.c err = fuse_copy_one(cs, &outarg, sizeof(outarg)); cs 1417 fs/fuse/dev.c fuse_copy_finish(cs); cs 1429 fs/fuse/dev.c fuse_copy_finish(cs); cs 1434 fs/fuse/dev.c struct fuse_copy_state *cs) cs 1449 fs/fuse/dev.c err = fuse_copy_one(cs, &outarg, sizeof(outarg)); cs 1463 fs/fuse/dev.c err = fuse_copy_one(cs, buf, outarg.namelen + 1); cs 1466 fs/fuse/dev.c fuse_copy_finish(cs); cs 1479 fs/fuse/dev.c fuse_copy_finish(cs); cs 1484 fs/fuse/dev.c struct fuse_copy_state *cs) cs 1499 fs/fuse/dev.c err = fuse_copy_one(cs, &outarg, sizeof(outarg)); cs 1513 fs/fuse/dev.c err = fuse_copy_one(cs, buf, outarg.namelen + 1); cs 1516 fs/fuse/dev.c fuse_copy_finish(cs); cs 1530 fs/fuse/dev.c fuse_copy_finish(cs); cs 1535 fs/fuse/dev.c struct fuse_copy_state *cs) cs 1552 fs/fuse/dev.c err = fuse_copy_one(cs, &outarg, sizeof(outarg)); cs 1594 fs/fuse/dev.c err = fuse_copy_page(cs, &page, offset, this_num, 0); cs 1616 fs/fuse/dev.c fuse_copy_finish(cs); cs 1715 fs/fuse/dev.c struct fuse_copy_state *cs) cs 1725 fs/fuse/dev.c err = fuse_copy_one(cs, &outarg, sizeof(outarg)); cs 1729 fs/fuse/dev.c fuse_copy_finish(cs); cs 1747 fs/fuse/dev.c fuse_copy_finish(cs); cs 1752 fs/fuse/dev.c unsigned int size, struct fuse_copy_state *cs) cs 1755 fs/fuse/dev.c cs->move_pages = 0; cs 1759 fs/fuse/dev.c return fuse_notify_poll(fc, size, cs); cs 1762 fs/fuse/dev.c return fuse_notify_inval_inode(fc, size, cs); cs 1765 fs/fuse/dev.c return fuse_notify_inval_entry(fc, size, cs); cs 1768 fs/fuse/dev.c return fuse_notify_store(fc, size, cs); cs 1771 fs/fuse/dev.c return fuse_notify_retrieve(fc, size, cs); cs 1774 fs/fuse/dev.c return fuse_notify_delete(fc, size, cs); cs 1777 fs/fuse/dev.c fuse_copy_finish(cs); cs 1795 fs/fuse/dev.c static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, cs 1812 fs/fuse/dev.c return fuse_copy_args(cs, args->out_numargs, args->out_pages, cs 1824 fs/fuse/dev.c struct fuse_copy_state *cs, size_t nbytes) cs 1836 fs/fuse/dev.c err = fuse_copy_one(cs, &oh, sizeof(oh)); cs 1849 fs/fuse/dev.c err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); cs 1891 fs/fuse/dev.c cs->req = req; cs 1893 fs/fuse/dev.c cs->move_pages = 0; cs 1898 fs/fuse/dev.c err = copy_out_args(cs, req->args, nbytes); cs 1899 fs/fuse/dev.c fuse_copy_finish(cs); cs 1916 fs/fuse/dev.c fuse_copy_finish(cs); cs 1922 fs/fuse/dev.c struct fuse_copy_state cs; cs 1931 fs/fuse/dev.c fuse_copy_init(&cs, 0, from); cs 1933 fs/fuse/dev.c return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); cs 1943 fs/fuse/dev.c struct fuse_copy_state cs; cs 2000 fs/fuse/dev.c fuse_copy_init(&cs, 0, NULL); cs 2001 fs/fuse/dev.c cs.pipebufs = bufs; cs 2002 fs/fuse/dev.c cs.nr_segs = nbuf; cs 2003 fs/fuse/dev.c cs.pipe = pipe; cs 2006 fs/fuse/dev.c cs.move_pages = 1; cs 2008 fs/fuse/dev.c ret = fuse_dev_do_write(fud, &cs, len); cs 3704 fs/nfsd/nfs4state.c struct nfsd4_compound_state *cs = &resp->cstate; cs 3706 fs/nfsd/nfs4state.c if (nfsd4_has_session(cs)) { cs 3707 fs/nfsd/nfs4state.c if (cs->status != nfserr_replay_cache) { cs 3709 fs/nfsd/nfs4state.c cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; cs 3712 fs/nfsd/nfs4state.c nfsd4_put_session(cs->session); cs 3713 fs/nfsd/nfs4state.c } else if (cs->clp) cs 3714 fs/nfsd/nfs4state.c put_client_renew(cs->clp); cs 72 fs/nfsd/xdr4.h static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) cs 74 fs/nfsd/xdr4.h return cs->slot != NULL; cs 517 fs/ocfs2/cluster/heartbeat.c unsigned int cs = *current_slot; cs 533 fs/ocfs2/cluster/heartbeat.c bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); cs 539 fs/ocfs2/cluster/heartbeat.c vec_start = (cs << bits) % PAGE_SIZE; cs 540 fs/ocfs2/cluster/heartbeat.c while(cs < max_slots) { cs 541 fs/ocfs2/cluster/heartbeat.c current_page = cs / spp; cs 545 fs/ocfs2/cluster/heartbeat.c (max_slots-cs) * (PAGE_SIZE/spp) ); cs 553 fs/ocfs2/cluster/heartbeat.c cs += vec_len / (PAGE_SIZE/spp); cs 558 fs/ocfs2/cluster/heartbeat.c *current_slot = cs; cs 359 fs/ubifs/log.c struct ubifs_cs_node *cs; cs 369 fs/ubifs/log.c buf = cs = kmalloc(max_len, GFP_NOFS); cs 373 fs/ubifs/log.c cs->ch.node_type = UBIFS_CS_NODE; cs 374 fs/ubifs/log.c cs->cmt_no = cpu_to_le64(c->cmt_no); cs 375 fs/ubifs/log.c ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0); cs 381 fs/ubifs/log.c err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ); cs 434 fs/ubifs/log.c err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len); cs 79 fs/ubifs/sb.c struct ubifs_cs_node *cs; cs 166 fs/ubifs/sb.c cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL); cs 168 fs/ubifs/sb.c if (!sup || !mst || !idx || !ino || !cs) { cs 319 fs/ubifs/sb.c cs->ch.node_type = UBIFS_CS_NODE; cs 351 fs/ubifs/sb.c err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); cs 363 fs/ubifs/sb.c kfree(cs); cs 38 include/linux/ceph/string_table.h static inline int ceph_compare_string(struct ceph_string *cs, cs 41 include/linux/ceph/string_table.h size_t cs_len = cs ? cs->len : 0; cs 46 include/linux/ceph/string_table.h return strncmp(cs->str, str, len); cs 81 include/linux/clocksource.h u64 (*read)(struct clocksource *cs); cs 94 include/linux/clocksource.h int (*enable)(struct clocksource *cs); cs 95 include/linux/clocksource.h void (*disable)(struct clocksource *cs); cs 97 include/linux/clocksource.h void (*suspend)(struct clocksource *cs); cs 98 include/linux/clocksource.h void (*resume)(struct clocksource *cs); cs 99 include/linux/clocksource.h void (*mark_unstable)(struct clocksource *cs); cs 100 include/linux/clocksource.h void (*tick_stable)(struct clocksource *cs); cs 192 include/linux/clocksource.h extern void clocksource_change_rating(struct clocksource *cs, int rating); cs 196 include/linux/clocksource.h extern void clocksource_mark_unstable(struct clocksource *cs); cs 198 include/linux/clocksource.h clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); cs 199 include/linux/clocksource.h extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); cs 211 include/linux/clocksource.h __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); cs 213 include/linux/clocksource.h __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq); cs 219 include/linux/clocksource.h static inline int __clocksource_register(struct clocksource *cs) cs 221 include/linux/clocksource.h return __clocksource_register_scale(cs, 1, 0); cs 224 include/linux/clocksource.h static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) cs 226 include/linux/clocksource.h return __clocksource_register_scale(cs, 1, hz); cs 229 include/linux/clocksource.h static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) cs 231 include/linux/clocksource.h return __clocksource_register_scale(cs, 1000, khz); cs 234 include/linux/clocksource.h static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz) cs 236 include/linux/clocksource.h __clocksource_update_freq_scale(cs, 1, hz); cs 239 include/linux/clocksource.h static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz) cs 241 include/linux/clocksource.h __clocksource_update_freq_scale(cs, 1000, khz); cs 245 include/linux/clocksource.h extern void clocksource_arch_init(struct clocksource *cs); cs 247 include/linux/clocksource.h static inline void clocksource_arch_init(struct clocksource *cs) { } cs 34 include/linux/dw_apb_timer.h struct clocksource cs; cs 83 include/linux/energy_model.h struct em_cap_state *cs; cs 93 include/linux/energy_model.h cs = &pd->table[pd->nr_cap_states - 1]; cs 94 include/linux/energy_model.h freq = map_util_freq(max_util, cs->frequency, scale_cpu); cs 101 include/linux/energy_model.h cs = &pd->table[i]; cs 102 include/linux/energy_model.h if (cs->frequency >= freq) cs 148 include/linux/energy_model.h return cs->cost * sum_util / scale_cpu; cs 36 include/linux/mbus.h } cs[4]; cs 99 include/linux/mfd/syscon/atmel-matrix.h #define AT91_MATRIX_CSA(cs, val) (val << (cs)) cs 18 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_SMC_SETUP(cs) (((cs) * 0x10)) cs 19 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_HSMC_SETUP(layout, cs) \ cs 20 include/linux/mfd/syscon/atmel-smc.h ((layout)->timing_regs_offset + ((cs) * 0x14)) cs 21 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4) cs 22 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_HSMC_PULSE(layout, cs) \ cs 23 include/linux/mfd/syscon/atmel-smc.h ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4) cs 24 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8) cs 25 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_HSMC_CYCLE(layout, cs) \ cs 26 include/linux/mfd/syscon/atmel-smc.h ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8) cs 32 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc) cs 33 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_HSMC_MODE(layout, cs) \ cs 34 include/linux/mfd/syscon/atmel-smc.h ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x10) cs 64 include/linux/mfd/syscon/atmel-smc.h #define ATMEL_HSMC_TIMINGS(layout, cs) \ cs 65 include/linux/mfd/syscon/atmel-smc.h ((layout)->timing_regs_offset + ((cs) * 0x14) + 0xc) cs 106 include/linux/mfd/syscon/atmel-smc.h void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs, cs 110 include/linux/mfd/syscon/atmel-smc.h int cs, const struct atmel_smc_cs_conf *conf); cs 111 include/linux/mfd/syscon/atmel-smc.h void atmel_smc_cs_conf_get(struct regmap *regmap, int cs, cs 115 include/linux/mfd/syscon/atmel-smc.h int cs, struct atmel_smc_cs_conf *conf); cs 20 include/linux/mtd/latch-addr-flash.h int (*init)(void *data, int cs); cs 57 include/linux/mtd/platnand.h void (*select_chip)(struct nand_chip *chip, int cs); cs 861 include/linux/mtd/rawnand.h unsigned int cs; cs 868 include/linux/mtd/rawnand.h .cs = _cs, \ cs 993 include/linux/mtd/rawnand.h void (*select_chip)(struct nand_chip *chip, int cs); cs 1384 include/linux/mtd/rawnand.h void nand_select_target(struct nand_chip *chip, unsigned int cs); cs 1251 include/linux/nvme.h __le64 cs; cs 32 include/linux/omap-gpmc.h int cs); cs 43 include/linux/omap-gpmc.h int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, cs 49 include/linux/omap-gpmc.h int cs) cs 55 include/linux/omap-gpmc.h int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, cs 73 include/linux/omap-gpmc.h extern void gpmc_cs_write_reg(int cs, int idx, u32 val); cs 75 include/linux/omap-gpmc.h extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, cs 77 include/linux/omap-gpmc.h extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p); cs 78 include/linux/omap-gpmc.h extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); cs 79 include/linux/omap-gpmc.h extern void gpmc_cs_free(int cs); cs 150 include/linux/pe.h uint16_t cs; /* initial %cs relative to load segment */ cs 166 include/linux/platform_data/gpmc-omap.h struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; cs 22 include/linux/platform_data/ti-aemif.h u32 cs; cs 20 include/linux/spi/s3c24xx.h void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); cs 606 include/linux/spi/spi.h int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); cs 264 include/linux/timekeeping.h struct clocksource *cs; cs 25 include/soc/at91/atmel-sfr.h #define AT91_SFR_CCFG_EBI_CSA(cs, val) ((val) << (cs)) cs 458 include/soc/fsl/qman.h static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs) cs 464 include/soc/fsl/qman.h (cs & QM_FQD_XS_MASK); cs 566 include/soc/fsl/qman.h u8 cs; /* boolean, only used in query response */ cs 7 include/sound/pcm_iec958.h int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs, cs 11 include/sound/pcm_iec958.h u8 *cs, size_t len); cs 33 include/trace/events/mce.h __field( u8, cs ) cs 53 include/trace/events/mce.h __entry->cs = m->cs; cs 64 include/trace/events/mce.h __entry->cs, __entry->ip, cs 587 include/trace/events/rxrpc.h __field(enum rxrpc_conn_cache_state, cs ) cs 596 include/trace/events/rxrpc.h __entry->cs = conn->cache_state; cs 603 include/trace/events/rxrpc.h __print_symbolic(__entry->cs, rxrpc_conn_cache_states), cs 56 include/uapi/linux/hdlcdrv.h struct hdlcdrv_channel_state cs; cs 228 include/uapi/linux/wimax/i2400m.h __u8 cs; cs 18 include/uapi/video/uvesafb.h __u16 cs; cs 400 include/video/imx-ipu-v3.h enum ipu_color_space cs; cs 407 include/video/imx-ipu-v3.h enum ipu_color_space cs) cs 411 include/video/imx-ipu-v3.h ic_cs->cs = cs; cs 348 include/xen/interface/xen-mca.h __u8 cs; /* code segment */ cs 203 kernel/cgroup/cpuset.c static inline struct cpuset *parent_cs(struct cpuset *cs) cs 205 kernel/cgroup/cpuset.c return css_cs(cs->css.parent); cs 221 kernel/cgroup/cpuset.c static inline bool is_cpuset_online(struct cpuset *cs) cs 223 kernel/cgroup/cpuset.c return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); cs 226 kernel/cgroup/cpuset.c static inline int is_cpu_exclusive(const struct cpuset *cs) cs 228 kernel/cgroup/cpuset.c return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); cs 231 kernel/cgroup/cpuset.c static inline int is_mem_exclusive(const struct cpuset *cs) cs 233 kernel/cgroup/cpuset.c return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); cs 236 kernel/cgroup/cpuset.c static inline int is_mem_hardwall(const struct cpuset *cs) cs 238 kernel/cgroup/cpuset.c return test_bit(CS_MEM_HARDWALL, &cs->flags); cs 241 kernel/cgroup/cpuset.c static inline int is_sched_load_balance(const struct cpuset *cs) cs 243 kernel/cgroup/cpuset.c return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cs 246 kernel/cgroup/cpuset.c static inline int is_memory_migrate(const struct cpuset *cs) cs 248 kernel/cgroup/cpuset.c return test_bit(CS_MEMORY_MIGRATE, &cs->flags); cs 251 kernel/cgroup/cpuset.c static inline int is_spread_page(const struct cpuset *cs) cs 253 kernel/cgroup/cpuset.c return test_bit(CS_SPREAD_PAGE, &cs->flags); cs 256 kernel/cgroup/cpuset.c static inline int is_spread_slab(const struct cpuset *cs) cs 258 kernel/cgroup/cpuset.c return test_bit(CS_SPREAD_SLAB, &cs->flags); cs 261 kernel/cgroup/cpuset.c static inline int is_partition_root(const struct cpuset *cs) cs 263 kernel/cgroup/cpuset.c return cs->partition_root_state > 0; cs 380 kernel/cgroup/cpuset.c static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) cs 382 kernel/cgroup/cpuset.c while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { cs 383 kernel/cgroup/cpuset.c cs = parent_cs(cs); cs 384 kernel/cgroup/cpuset.c if (unlikely(!cs)) { cs 396 kernel/cgroup/cpuset.c cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); cs 410 kernel/cgroup/cpuset.c static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) cs 412 kernel/cgroup/cpuset.c while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) cs 413 kernel/cgroup/cpuset.c cs = parent_cs(cs); cs 414 kernel/cgroup/cpuset.c nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); cs 422 kernel/cgroup/cpuset.c static void cpuset_update_task_spread_flag(struct cpuset *cs, cs 425 kernel/cgroup/cpuset.c if (is_spread_page(cs)) cs 430 kernel/cgroup/cpuset.c if (is_spread_slab(cs)) cs 460 kernel/cgroup/cpuset.c static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) cs 464 kernel/cgroup/cpuset.c if (cs) { cs 465 kernel/cgroup/cpuset.c pmask1 = &cs->cpus_allowed; cs 466 kernel/cgroup/cpuset.c pmask2 = &cs->effective_cpus; cs 467 kernel/cgroup/cpuset.c pmask3 = &cs->subparts_cpus; cs 497 kernel/cgroup/cpuset.c static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) cs 499 kernel/cgroup/cpuset.c if (cs) { cs 500 kernel/cgroup/cpuset.c free_cpumask_var(cs->cpus_allowed); cs 501 kernel/cgroup/cpuset.c free_cpumask_var(cs->effective_cpus); cs 502 kernel/cgroup/cpuset.c free_cpumask_var(cs->subparts_cpus); cs 515 kernel/cgroup/cpuset.c static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) cs 519 kernel/cgroup/cpuset.c trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); cs 528 kernel/cgroup/cpuset.c cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); cs 529 kernel/cgroup/cpuset.c cpumask_copy(trial->effective_cpus, cs->effective_cpus); cs 537 kernel/cgroup/cpuset.c static inline void free_cpuset(struct cpuset *cs) cs 539 kernel/cgroup/cpuset.c free_cpumasks(cs, NULL); cs 540 kernel/cgroup/cpuset.c kfree(cs); cs 910 kernel/cgroup/cpuset.c static void update_tasks_root_domain(struct cpuset *cs) cs 915 kernel/cgroup/cpuset.c css_task_iter_start(&cs->css, 0, &it); cs 925 kernel/cgroup/cpuset.c struct cpuset *cs = NULL; cs 942 kernel/cgroup/cpuset.c cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { cs 944 kernel/cgroup/cpuset.c if (cpumask_empty(cs->effective_cpus)) { cs 949 kernel/cgroup/cpuset.c css_get(&cs->css); cs 953 kernel/cgroup/cpuset.c update_tasks_root_domain(cs); cs 956 kernel/cgroup/cpuset.c css_put(&cs->css); cs 1033 kernel/cgroup/cpuset.c static void update_tasks_cpumask(struct cpuset *cs) cs 1038 kernel/cgroup/cpuset.c css_task_iter_start(&cs->css, 0, &it); cs 1040 kernel/cgroup/cpuset.c set_cpus_allowed_ptr(task, cs->effective_cpus); cs 1056 kernel/cgroup/cpuset.c struct cpuset *cs, struct cpuset *parent) cs 1061 kernel/cgroup/cpuset.c cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); cs 1064 kernel/cgroup/cpuset.c cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); cs 1294 kernel/cgroup/cpuset.c static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) cs 1301 kernel/cgroup/cpuset.c cpuset_for_each_descendant_pre(cp, pos_css, cs) { cs 1338 kernel/cgroup/cpuset.c if ((cp != cs) && cp->partition_root_state) { cs 1447 kernel/cgroup/cpuset.c static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, cs 1460 kernel/cgroup/cpuset.c if (sibling == cs) cs 1476 kernel/cgroup/cpuset.c static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, cs 1483 kernel/cgroup/cpuset.c if (cs == &top_cpuset) cs 1505 kernel/cgroup/cpuset.c if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) cs 1508 kernel/cgroup/cpuset.c retval = validate_change(cs, trialcs); cs 1522 kernel/cgroup/cpuset.c if (cs->partition_root_state) { cs 1526 kernel/cgroup/cpuset.c if (update_parent_subparts_cpumask(cs, partcmd_update, cs 1532 kernel/cgroup/cpuset.c cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); cs 1537 kernel/cgroup/cpuset.c if (cs->nr_subparts_cpus) { cs 1538 kernel/cgroup/cpuset.c cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, cs 1539 kernel/cgroup/cpuset.c cs->cpus_allowed); cs 1540 kernel/cgroup/cpuset.c cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); cs 1544 kernel/cgroup/cpuset.c update_cpumasks_hier(cs, &tmp); cs 1546 kernel/cgroup/cpuset.c if (cs->partition_root_state) { cs 1547 kernel/cgroup/cpuset.c struct cpuset *parent = parent_cs(cs); cs 1554 kernel/cgroup/cpuset.c update_sibling_cpumasks(parent, cs, &tmp); cs 1645 kernel/cgroup/cpuset.c static void update_tasks_nodemask(struct cpuset *cs) cs 1651 kernel/cgroup/cpuset.c cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ cs 1653 kernel/cgroup/cpuset.c guarantee_online_mems(cs, &newmems); cs 1665 kernel/cgroup/cpuset.c css_task_iter_start(&cs->css, 0, &it); cs 1676 kernel/cgroup/cpuset.c migrate = is_memory_migrate(cs); cs 1678 kernel/cgroup/cpuset.c mpol_rebind_mm(mm, &cs->mems_allowed); cs 1680 kernel/cgroup/cpuset.c cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); cs 1690 kernel/cgroup/cpuset.c cs->old_mems_allowed = newmems; cs 1708 kernel/cgroup/cpuset.c static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) cs 1714 kernel/cgroup/cpuset.c cpuset_for_each_descendant_pre(cp, pos_css, cs) { cs 1764 kernel/cgroup/cpuset.c static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, cs 1773 kernel/cgroup/cpuset.c if (cs == &top_cpuset) { cs 1798 kernel/cgroup/cpuset.c if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { cs 1802 kernel/cgroup/cpuset.c retval = validate_change(cs, trialcs); cs 1807 kernel/cgroup/cpuset.c cs->mems_allowed = trialcs->mems_allowed; cs 1811 kernel/cgroup/cpuset.c update_nodemasks_hier(cs, &trialcs->mems_allowed); cs 1827 kernel/cgroup/cpuset.c static int update_relax_domain_level(struct cpuset *cs, s64 val) cs 1834 kernel/cgroup/cpuset.c if (val != cs->relax_domain_level) { cs 1835 kernel/cgroup/cpuset.c cs->relax_domain_level = val; cs 1836 kernel/cgroup/cpuset.c if (!cpumask_empty(cs->cpus_allowed) && cs 1837 kernel/cgroup/cpuset.c is_sched_load_balance(cs)) cs 1852 kernel/cgroup/cpuset.c static void update_tasks_flags(struct cpuset *cs) cs 1857 kernel/cgroup/cpuset.c css_task_iter_start(&cs->css, 0, &it); cs 1859 kernel/cgroup/cpuset.c cpuset_update_task_spread_flag(cs, task); cs 1872 kernel/cgroup/cpuset.c static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, cs 1880 kernel/cgroup/cpuset.c trialcs = alloc_trial_cpuset(cs); cs 1889 kernel/cgroup/cpuset.c err = validate_change(cs, trialcs); cs 1893 kernel/cgroup/cpuset.c balance_flag_changed = (is_sched_load_balance(cs) != cs 1896 kernel/cgroup/cpuset.c spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) cs 1897 kernel/cgroup/cpuset.c || (is_spread_page(cs) != is_spread_page(trialcs))); cs 1900 kernel/cgroup/cpuset.c cs->flags = trialcs->flags; cs 1907 kernel/cgroup/cpuset.c update_tasks_flags(cs); cs 1920 kernel/cgroup/cpuset.c static int update_prstate(struct cpuset *cs, int val) cs 1923 kernel/cgroup/cpuset.c struct cpuset *parent = parent_cs(cs); cs 1928 kernel/cgroup/cpuset.c if (val == cs->partition_root_state) cs 1935 kernel/cgroup/cpuset.c if (val && cs->partition_root_state) cs 1942 kernel/cgroup/cpuset.c if (!cs->partition_root_state) { cs 1948 kernel/cgroup/cpuset.c if (cpumask_empty(cs->cpus_allowed)) cs 1951 kernel/cgroup/cpuset.c err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); cs 1955 kernel/cgroup/cpuset.c err = update_parent_subparts_cpumask(cs, partcmd_enable, cs 1958 kernel/cgroup/cpuset.c update_flag(CS_CPU_EXCLUSIVE, cs, 0); cs 1961 kernel/cgroup/cpuset.c cs->partition_root_state = PRS_ENABLED; cs 1967 kernel/cgroup/cpuset.c if (cs->partition_root_state == PRS_ERROR) { cs 1968 kernel/cgroup/cpuset.c cs->partition_root_state = 0; cs 1969 kernel/cgroup/cpuset.c update_flag(CS_CPU_EXCLUSIVE, cs, 0); cs 1974 kernel/cgroup/cpuset.c err = update_parent_subparts_cpumask(cs, partcmd_disable, cs 1979 kernel/cgroup/cpuset.c cs->partition_root_state = 0; cs 1982 kernel/cgroup/cpuset.c update_flag(CS_CPU_EXCLUSIVE, cs, 0); cs 1993 kernel/cgroup/cpuset.c update_sibling_cpumasks(parent, cs, &tmp); cs 2108 kernel/cgroup/cpuset.c struct cpuset *cs; cs 2114 kernel/cgroup/cpuset.c cs = css_cs(css); cs 2121 kernel/cgroup/cpuset.c (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) cs 2125 kernel/cgroup/cpuset.c ret = task_can_attach(task, cs->cpus_allowed); cs 2137 kernel/cgroup/cpuset.c cs->attach_in_progress++; cs 2169 kernel/cgroup/cpuset.c struct cpuset *cs; cs 2173 kernel/cgroup/cpuset.c cs = css_cs(css); cs 2178 kernel/cgroup/cpuset.c if (cs == &top_cpuset) cs 2181 kernel/cgroup/cpuset.c guarantee_online_cpus(cs, cpus_attach); cs 2183 kernel/cgroup/cpuset.c guarantee_online_mems(cs, &cpuset_attach_nodemask_to); cs 2193 kernel/cgroup/cpuset.c cpuset_update_task_spread_flag(cs, task); cs 2200 kernel/cgroup/cpuset.c cpuset_attach_nodemask_to = cs->effective_mems; cs 2215 kernel/cgroup/cpuset.c if (is_memory_migrate(cs)) cs 2223 kernel/cgroup/cpuset.c cs->old_mems_allowed = cpuset_attach_nodemask_to; cs 2225 kernel/cgroup/cpuset.c cs->attach_in_progress--; cs 2226 kernel/cgroup/cpuset.c if (!cs->attach_in_progress) cs 2256 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2262 kernel/cgroup/cpuset.c if (!is_cpuset_online(cs)) { cs 2269 kernel/cgroup/cpuset.c retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); cs 2272 kernel/cgroup/cpuset.c retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); cs 2275 kernel/cgroup/cpuset.c retval = update_flag(CS_MEM_HARDWALL, cs, val); cs 2278 kernel/cgroup/cpuset.c retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); cs 2281 kernel/cgroup/cpuset.c retval = update_flag(CS_MEMORY_MIGRATE, cs, val); cs 2287 kernel/cgroup/cpuset.c retval = update_flag(CS_SPREAD_PAGE, cs, val); cs 2290 kernel/cgroup/cpuset.c retval = update_flag(CS_SPREAD_SLAB, cs, val); cs 2305 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2311 kernel/cgroup/cpuset.c if (!is_cpuset_online(cs)) cs 2316 kernel/cgroup/cpuset.c retval = update_relax_domain_level(cs, val); cs 2334 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(of_css(of)); cs 2359 kernel/cgroup/cpuset.c css_get(&cs->css); cs 2365 kernel/cgroup/cpuset.c if (!is_cpuset_online(cs)) cs 2368 kernel/cgroup/cpuset.c trialcs = alloc_trial_cpuset(cs); cs 2376 kernel/cgroup/cpuset.c retval = update_cpumask(cs, trialcs, buf); cs 2379 kernel/cgroup/cpuset.c retval = update_nodemask(cs, trialcs, buf); cs 2391 kernel/cgroup/cpuset.c css_put(&cs->css); cs 2406 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(seq_css(sf)); cs 2414 kernel/cgroup/cpuset.c seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); cs 2417 kernel/cgroup/cpuset.c seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); cs 2420 kernel/cgroup/cpuset.c seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); cs 2423 kernel/cgroup/cpuset.c seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); cs 2426 kernel/cgroup/cpuset.c seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); cs 2438 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2442 kernel/cgroup/cpuset.c return is_cpu_exclusive(cs); cs 2444 kernel/cgroup/cpuset.c return is_mem_exclusive(cs); cs 2446 kernel/cgroup/cpuset.c return is_mem_hardwall(cs); cs 2448 kernel/cgroup/cpuset.c return is_sched_load_balance(cs); cs 2450 kernel/cgroup/cpuset.c return is_memory_migrate(cs); cs 2454 kernel/cgroup/cpuset.c return fmeter_getrate(&cs->fmeter); cs 2456 kernel/cgroup/cpuset.c return is_spread_page(cs); cs 2458 kernel/cgroup/cpuset.c return is_spread_slab(cs); cs 2469 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2473 kernel/cgroup/cpuset.c return cs->relax_domain_level; cs 2484 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(seq_css(seq)); cs 2486 kernel/cgroup/cpuset.c switch (cs->partition_root_state) { cs 2503 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(of_css(of)); cs 2519 kernel/cgroup/cpuset.c css_get(&cs->css); cs 2522 kernel/cgroup/cpuset.c if (!is_cpuset_online(cs)) cs 2525 kernel/cgroup/cpuset.c retval = update_prstate(cs, val); cs 2529 kernel/cgroup/cpuset.c css_put(&cs->css); cs 2701 kernel/cgroup/cpuset.c struct cpuset *cs; cs 2706 kernel/cgroup/cpuset.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 2707 kernel/cgroup/cpuset.c if (!cs) cs 2710 kernel/cgroup/cpuset.c if (alloc_cpumasks(cs, NULL)) { cs 2711 kernel/cgroup/cpuset.c kfree(cs); cs 2715 kernel/cgroup/cpuset.c set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cs 2716 kernel/cgroup/cpuset.c nodes_clear(cs->mems_allowed); cs 2717 kernel/cgroup/cpuset.c nodes_clear(cs->effective_mems); cs 2718 kernel/cgroup/cpuset.c fmeter_init(&cs->fmeter); cs 2719 kernel/cgroup/cpuset.c cs->relax_domain_level = -1; cs 2721 kernel/cgroup/cpuset.c return &cs->css; cs 2726 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2727 kernel/cgroup/cpuset.c struct cpuset *parent = parent_cs(cs); cs 2737 kernel/cgroup/cpuset.c set_bit(CS_ONLINE, &cs->flags); cs 2739 kernel/cgroup/cpuset.c set_bit(CS_SPREAD_PAGE, &cs->flags); cs 2741 kernel/cgroup/cpuset.c set_bit(CS_SPREAD_SLAB, &cs->flags); cs 2747 kernel/cgroup/cpuset.c cpumask_copy(cs->effective_cpus, parent->effective_cpus); cs 2748 kernel/cgroup/cpuset.c cs->effective_mems = parent->effective_mems; cs 2749 kernel/cgroup/cpuset.c cs->use_parent_ecpus = true; cs 2780 kernel/cgroup/cpuset.c cs->mems_allowed = parent->mems_allowed; cs 2781 kernel/cgroup/cpuset.c cs->effective_mems = parent->mems_allowed; cs 2782 kernel/cgroup/cpuset.c cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cs 2783 kernel/cgroup/cpuset.c cpumask_copy(cs->effective_cpus, parent->cpus_allowed); cs 2804 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2809 kernel/cgroup/cpuset.c if (is_partition_root(cs)) cs 2810 kernel/cgroup/cpuset.c update_prstate(cs, 0); cs 2813 kernel/cgroup/cpuset.c is_sched_load_balance(cs)) cs 2814 kernel/cgroup/cpuset.c update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); cs 2816 kernel/cgroup/cpuset.c if (cs->use_parent_ecpus) { cs 2817 kernel/cgroup/cpuset.c struct cpuset *parent = parent_cs(cs); cs 2819 kernel/cgroup/cpuset.c cs->use_parent_ecpus = false; cs 2824 kernel/cgroup/cpuset.c clear_bit(CS_ONLINE, &cs->flags); cs 2832 kernel/cgroup/cpuset.c struct cpuset *cs = css_cs(css); cs 2834 kernel/cgroup/cpuset.c free_cpuset(cs); cs 2921 kernel/cgroup/cpuset.c static void remove_tasks_in_empty_cpuset(struct cpuset *cs) cs 2929 kernel/cgroup/cpuset.c parent = parent_cs(cs); cs 2934 kernel/cgroup/cpuset.c if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { cs 2936 kernel/cgroup/cpuset.c pr_cont_cgroup_name(cs->css.cgroup); cs 2942 kernel/cgroup/cpuset.c hotplug_update_tasks_legacy(struct cpuset *cs, cs 2949 kernel/cgroup/cpuset.c cpumask_copy(cs->cpus_allowed, new_cpus); cs 2950 kernel/cgroup/cpuset.c cpumask_copy(cs->effective_cpus, new_cpus); cs 2951 kernel/cgroup/cpuset.c cs->mems_allowed = *new_mems; cs 2952 kernel/cgroup/cpuset.c cs->effective_mems = *new_mems; cs 2959 kernel/cgroup/cpuset.c if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) cs 2960 kernel/cgroup/cpuset.c update_tasks_cpumask(cs); cs 2961 kernel/cgroup/cpuset.c if (mems_updated && !nodes_empty(cs->mems_allowed)) cs 2962 kernel/cgroup/cpuset.c update_tasks_nodemask(cs); cs 2964 kernel/cgroup/cpuset.c is_empty = cpumask_empty(cs->cpus_allowed) || cs 2965 kernel/cgroup/cpuset.c nodes_empty(cs->mems_allowed); cs 2975 kernel/cgroup/cpuset.c remove_tasks_in_empty_cpuset(cs); cs 2981 kernel/cgroup/cpuset.c hotplug_update_tasks(struct cpuset *cs, cs 2986 kernel/cgroup/cpuset.c cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); cs 2988 kernel/cgroup/cpuset.c *new_mems = parent_cs(cs)->effective_mems; cs 2991 kernel/cgroup/cpuset.c cpumask_copy(cs->effective_cpus, new_cpus); cs 2992 kernel/cgroup/cpuset.c cs->effective_mems = *new_mems; cs 2996 kernel/cgroup/cpuset.c update_tasks_cpumask(cs); cs 2998 kernel/cgroup/cpuset.c update_tasks_nodemask(cs); cs 3017 kernel/cgroup/cpuset.c static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) cs 3025 kernel/cgroup/cpuset.c wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); cs 3033 kernel/cgroup/cpuset.c if (cs->attach_in_progress) { cs 3038 kernel/cgroup/cpuset.c parent = parent_cs(cs); cs 3039 kernel/cgroup/cpuset.c compute_effective_cpumask(&new_cpus, cs, parent); cs 3040 kernel/cgroup/cpuset.c nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); cs 3042 kernel/cgroup/cpuset.c if (cs->nr_subparts_cpus) cs 3047 kernel/cgroup/cpuset.c cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); cs 3049 kernel/cgroup/cpuset.c if (!tmp || !cs->partition_root_state) cs 3057 kernel/cgroup/cpuset.c if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || cs 3059 kernel/cgroup/cpuset.c if (cs->nr_subparts_cpus) { cs 3060 kernel/cgroup/cpuset.c cs->nr_subparts_cpus = 0; cs 3061 kernel/cgroup/cpuset.c cpumask_clear(cs->subparts_cpus); cs 3062 kernel/cgroup/cpuset.c compute_effective_cpumask(&new_cpus, cs, parent); cs 3073 kernel/cgroup/cpuset.c update_parent_subparts_cpumask(cs, partcmd_disable, cs 3075 kernel/cgroup/cpuset.c cs->partition_root_state = PRS_ERROR; cs 3086 kernel/cgroup/cpuset.c ((cs->partition_root_state == PRS_ERROR) || cs 3088 kernel/cgroup/cpuset.c update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) cs 3092 kernel/cgroup/cpuset.c cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); cs 3093 kernel/cgroup/cpuset.c mems_updated = !nodes_equal(new_mems, cs->effective_mems); cs 3096 kernel/cgroup/cpuset.c hotplug_update_tasks(cs, &new_cpus, &new_mems, cs 3099 kernel/cgroup/cpuset.c hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, cs 3186 kernel/cgroup/cpuset.c struct cpuset *cs; cs 3190 kernel/cgroup/cpuset.c cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { cs 3191 kernel/cgroup/cpuset.c if (cs == &top_cpuset || !css_tryget_online(&cs->css)) cs 3195 kernel/cgroup/cpuset.c cpuset_hotplug_update_tasks(cs, ptmp); cs 3198 kernel/cgroup/cpuset.c css_put(&cs->css); cs 3370 kernel/cgroup/cpuset.c static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) cs 3372 kernel/cgroup/cpuset.c while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) cs 3373 kernel/cgroup/cpuset.c cs = parent_cs(cs); cs 3374 kernel/cgroup/cpuset.c return cs; cs 3419 kernel/cgroup/cpuset.c struct cpuset *cs; /* current cpuset ancestors */ cs 3443 kernel/cgroup/cpuset.c cs = nearest_hardwall_ancestor(task_cs(current)); cs 3444 kernel/cgroup/cpuset.c allowed = node_isset(node, cs->mems_allowed); cs 30 kernel/power/energy_model.c static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd) cs 35 kernel/power/energy_model.c snprintf(name, sizeof(name), "cs:%lu", cs->frequency); cs 39 kernel/power/energy_model.c debugfs_create_ulong("frequency", 0444, d, &cs->frequency); cs 40 kernel/power/energy_model.c debugfs_create_ulong("power", 0444, d, &cs->power); cs 41 kernel/power/energy_model.c debugfs_create_ulong("cost", 0444, d, &cs->cost); cs 119 kernel/time/clocksource.c static void __clocksource_change_rating(struct clocksource *cs, int rating); cs 145 kernel/time/clocksource.c static void __clocksource_unstable(struct clocksource *cs) cs 147 kernel/time/clocksource.c cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs 148 kernel/time/clocksource.c cs->flags |= CLOCK_SOURCE_UNSTABLE; cs 154 kernel/time/clocksource.c if (list_empty(&cs->list)) { cs 155 kernel/time/clocksource.c cs->rating = 0; cs 159 kernel/time/clocksource.c if (cs->mark_unstable) cs 160 kernel/time/clocksource.c cs->mark_unstable(cs); cs 174 kernel/time/clocksource.c void clocksource_mark_unstable(struct clocksource *cs) cs 179 kernel/time/clocksource.c if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { cs 180 kernel/time/clocksource.c if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) cs 181 kernel/time/clocksource.c list_add(&cs->wd_list, &watchdog_list); cs 182 kernel/time/clocksource.c __clocksource_unstable(cs); cs 189 kernel/time/clocksource.c struct clocksource *cs; cs 200 kernel/time/clocksource.c list_for_each_entry(cs, &watchdog_list, wd_list) { cs 203 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_UNSTABLE) { cs 210 kernel/time/clocksource.c csnow = cs->read(cs); cs 215 kernel/time/clocksource.c if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || cs 217 kernel/time/clocksource.c cs->flags |= CLOCK_SOURCE_WATCHDOG; cs 218 kernel/time/clocksource.c cs->wd_last = wdnow; cs 219 kernel/time/clocksource.c cs->cs_last = csnow; cs 223 kernel/time/clocksource.c delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); cs 227 kernel/time/clocksource.c delta = clocksource_delta(csnow, cs->cs_last, cs->mask); cs 228 kernel/time/clocksource.c cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); cs 229 kernel/time/clocksource.c wdlast = cs->wd_last; /* save these in case we print them */ cs 230 kernel/time/clocksource.c cslast = cs->cs_last; cs 231 kernel/time/clocksource.c cs->cs_last = csnow; cs 232 kernel/time/clocksource.c cs->wd_last = wdnow; cs 240 kernel/time/clocksource.c smp_processor_id(), cs->name); cs 244 kernel/time/clocksource.c cs->name, csnow, cslast, cs->mask); cs 245 kernel/time/clocksource.c __clocksource_unstable(cs); cs 249 kernel/time/clocksource.c if (cs == curr_clocksource && cs->tick_stable) cs 250 kernel/time/clocksource.c cs->tick_stable(cs); cs 252 kernel/time/clocksource.c if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && cs 253 kernel/time/clocksource.c (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && cs 256 kernel/time/clocksource.c cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs 273 kernel/time/clocksource.c if (cs != curr_clocksource) { cs 274 kernel/time/clocksource.c cs->flags |= CLOCK_SOURCE_RESELECT; cs 329 kernel/time/clocksource.c struct clocksource *cs; cs 331 kernel/time/clocksource.c list_for_each_entry(cs, &watchdog_list, wd_list) cs 332 kernel/time/clocksource.c cs->flags &= ~CLOCK_SOURCE_WATCHDOG; cs 340 kernel/time/clocksource.c static void clocksource_enqueue_watchdog(struct clocksource *cs) cs 342 kernel/time/clocksource.c INIT_LIST_HEAD(&cs->wd_list); cs 344 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { cs 346 kernel/time/clocksource.c list_add(&cs->wd_list, &watchdog_list); cs 347 kernel/time/clocksource.c cs->flags &= ~CLOCK_SOURCE_WATCHDOG; cs 350 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs 351 kernel/time/clocksource.c cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs 357 kernel/time/clocksource.c struct clocksource *cs, *old_wd; cs 366 kernel/time/clocksource.c list_for_each_entry(cs, &clocksource_list, list) { cs 368 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) cs 372 kernel/time/clocksource.c if (fallback && cs == old_wd) cs 376 kernel/time/clocksource.c if (!watchdog || cs->rating > watchdog->rating) cs 377 kernel/time/clocksource.c watchdog = cs; cs 392 kernel/time/clocksource.c static void clocksource_dequeue_watchdog(struct clocksource *cs) cs 394 kernel/time/clocksource.c if (cs != watchdog) { cs 395 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { cs 397 kernel/time/clocksource.c list_del_init(&cs->wd_list); cs 406 kernel/time/clocksource.c struct clocksource *cs, *tmp; cs 411 kernel/time/clocksource.c list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { cs 412 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_UNSTABLE) { cs 413 kernel/time/clocksource.c list_del_init(&cs->wd_list); cs 414 kernel/time/clocksource.c __clocksource_change_rating(cs, 0); cs 417 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_RESELECT) { cs 418 kernel/time/clocksource.c cs->flags &= ~CLOCK_SOURCE_RESELECT; cs 438 kernel/time/clocksource.c static bool clocksource_is_watchdog(struct clocksource *cs) cs 440 kernel/time/clocksource.c return cs == watchdog; cs 445 kernel/time/clocksource.c static void clocksource_enqueue_watchdog(struct clocksource *cs) cs 447 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs 448 kernel/time/clocksource.c cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs 452 kernel/time/clocksource.c static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } cs 455 kernel/time/clocksource.c static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } cs 456 kernel/time/clocksource.c void clocksource_mark_unstable(struct clocksource *cs) { } cs 463 kernel/time/clocksource.c static bool clocksource_is_suspend(struct clocksource *cs) cs 465 kernel/time/clocksource.c return cs == suspend_clocksource; cs 468 kernel/time/clocksource.c static void __clocksource_suspend_select(struct clocksource *cs) cs 473 kernel/time/clocksource.c if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) cs 481 kernel/time/clocksource.c if (cs->suspend || cs->resume) { cs 483 kernel/time/clocksource.c cs->name); cs 487 kernel/time/clocksource.c if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) cs 488 kernel/time/clocksource.c suspend_clocksource = cs; cs 497 kernel/time/clocksource.c struct clocksource *cs, *old_suspend; cs 503 kernel/time/clocksource.c list_for_each_entry(cs, &clocksource_list, list) { cs 505 kernel/time/clocksource.c if (fallback && cs == old_suspend) cs 508 kernel/time/clocksource.c __clocksource_suspend_select(cs); cs 525 kernel/time/clocksource.c void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) cs 535 kernel/time/clocksource.c if (clocksource_is_suspend(cs)) { cs 563 kernel/time/clocksource.c u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) cs 575 kernel/time/clocksource.c if (clocksource_is_suspend(cs)) cs 591 kernel/time/clocksource.c if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) cs 602 kernel/time/clocksource.c struct clocksource *cs; cs 604 kernel/time/clocksource.c list_for_each_entry_reverse(cs, &clocksource_list, list) cs 605 kernel/time/clocksource.c if (cs->suspend) cs 606 kernel/time/clocksource.c cs->suspend(cs); cs 614 kernel/time/clocksource.c struct clocksource *cs; cs 616 kernel/time/clocksource.c list_for_each_entry(cs, &clocksource_list, list) cs 617 kernel/time/clocksource.c if (cs->resume) cs 618 kernel/time/clocksource.c cs->resume(cs); cs 640 kernel/time/clocksource.c static u32 clocksource_max_adjustment(struct clocksource *cs) cs 646 kernel/time/clocksource.c ret = (u64)cs->mult * 11; cs 701 kernel/time/clocksource.c static inline void clocksource_update_max_deferment(struct clocksource *cs) cs 703 kernel/time/clocksource.c cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, cs 704 kernel/time/clocksource.c cs->maxadj, cs->mask, cs 705 kernel/time/clocksource.c &cs->max_cycles); cs 712 kernel/time/clocksource.c struct clocksource *cs; cs 722 kernel/time/clocksource.c list_for_each_entry(cs, &clocksource_list, list) { cs 723 kernel/time/clocksource.c if (skipcur && cs == curr_clocksource) cs 725 kernel/time/clocksource.c if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) cs 727 kernel/time/clocksource.c return cs; cs 735 kernel/time/clocksource.c struct clocksource *best, *cs; cs 746 kernel/time/clocksource.c list_for_each_entry(cs, &clocksource_list, list) { cs 747 kernel/time/clocksource.c if (skipcur && cs == curr_clocksource) cs 749 kernel/time/clocksource.c if (strcmp(cs->name, override_name) != 0) cs 756 kernel/time/clocksource.c if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { cs 758 kernel/time/clocksource.c if (cs->flags & CLOCK_SOURCE_UNSTABLE) { cs 760 kernel/time/clocksource.c cs->name); cs 768 kernel/time/clocksource.c cs->name); cs 772 kernel/time/clocksource.c best = cs; cs 832 kernel/time/clocksource.c static void clocksource_enqueue(struct clocksource *cs) cs 839 kernel/time/clocksource.c if (tmp->rating < cs->rating) cs 843 kernel/time/clocksource.c list_add(&cs->list, entry); cs 858 kernel/time/clocksource.c void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) cs 876 kernel/time/clocksource.c sec = cs->mask; cs 881 kernel/time/clocksource.c else if (sec > 600 && cs->mask > UINT_MAX) cs 884 kernel/time/clocksource.c clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, cs 891 kernel/time/clocksource.c cs->maxadj = clocksource_max_adjustment(cs); cs 892 kernel/time/clocksource.c while (freq && ((cs->mult + cs->maxadj < cs->mult) cs 893 kernel/time/clocksource.c || (cs->mult - cs->maxadj > cs->mult))) { cs 894 kernel/time/clocksource.c cs->mult >>= 1; cs 895 kernel/time/clocksource.c cs->shift--; cs 896 kernel/time/clocksource.c cs->maxadj = clocksource_max_adjustment(cs); cs 903 kernel/time/clocksource.c WARN_ONCE(cs->mult + cs->maxadj < cs->mult, cs 905 kernel/time/clocksource.c cs->name); cs 907 kernel/time/clocksource.c clocksource_update_max_deferment(cs); cs 910 kernel/time/clocksource.c cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); cs 925 kernel/time/clocksource.c int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) cs 929 kernel/time/clocksource.c clocksource_arch_init(cs); cs 932 kernel/time/clocksource.c __clocksource_update_freq_scale(cs, scale, freq); cs 938 kernel/time/clocksource.c clocksource_enqueue(cs); cs 939 kernel/time/clocksource.c clocksource_enqueue_watchdog(cs); cs 944 kernel/time/clocksource.c __clocksource_suspend_select(cs); cs 950 kernel/time/clocksource.c static void __clocksource_change_rating(struct clocksource *cs, int rating) cs 952 kernel/time/clocksource.c list_del(&cs->list); cs 953 kernel/time/clocksource.c cs->rating = rating; cs 954 kernel/time/clocksource.c clocksource_enqueue(cs); cs 962 kernel/time/clocksource.c void clocksource_change_rating(struct clocksource *cs, int rating) cs 968 kernel/time/clocksource.c __clocksource_change_rating(cs, rating); cs 981 kernel/time/clocksource.c static int clocksource_unbind(struct clocksource *cs) cs 985 kernel/time/clocksource.c if (clocksource_is_watchdog(cs)) { cs 988 kernel/time/clocksource.c if (clocksource_is_watchdog(cs)) cs 992 kernel/time/clocksource.c if (cs == curr_clocksource) { cs 995 kernel/time/clocksource.c if (curr_clocksource == cs) cs 999 kernel/time/clocksource.c if (clocksource_is_suspend(cs)) { cs 1009 kernel/time/clocksource.c clocksource_dequeue_watchdog(cs); cs 1010 kernel/time/clocksource.c list_del_init(&cs->list); cs 1020 kernel/time/clocksource.c int clocksource_unregister(struct clocksource *cs) cs 1025 kernel/time/clocksource.c if (!list_empty(&cs->list)) cs 1026 kernel/time/clocksource.c ret = clocksource_unbind(cs); cs 1112 kernel/time/clocksource.c struct clocksource *cs; cs 1122 kernel/time/clocksource.c list_for_each_entry(cs, &clocksource_list, list) { cs 1123 kernel/time/clocksource.c if (strcmp(cs->name, name)) cs 1125 kernel/time/clocksource.c ret = clocksource_unbind(cs); cs 35 kernel/time/jiffies.c static u64 jiffies_read(struct clocksource *cs) cs 73 kernel/time/timekeeping.c static u64 dummy_clock_read(struct clocksource *cs) cs 1151 kernel/time/timekeeping.c if (tk->tkr_mono.clock != system_counterval.cs) cs 358 lib/string.c int strcmp(const char *cs, const char *ct) cs 363 lib/string.c c1 = *cs++; cs 382 lib/string.c int strncmp(const char *cs, const char *ct, size_t count) cs 387 lib/string.c c1 = *cs++; cs 608 lib/string.c char *strpbrk(const char *cs, const char *ct) cs 612 lib/string.c for (sc1 = cs; *sc1 != '\0'; ++sc1) { cs 879 lib/string.c __visible int memcmp(const void *cs, const void *ct, size_t count) cs 884 lib/string.c for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) cs 189 net/bridge/netfilter/ebtables.c struct ebt_chainstack *cs; cs 203 net/bridge/netfilter/ebtables.c cs = private->chainstack[smp_processor_id()]; cs 205 net/bridge/netfilter/ebtables.c cs = NULL; cs 257 net/bridge/netfilter/ebtables.c i = cs[sp].n; cs 258 net/bridge/netfilter/ebtables.c chaininfo = cs[sp].chaininfo; cs 260 net/bridge/netfilter/ebtables.c point = cs[sp].e; cs 274 net/bridge/netfilter/ebtables.c cs[sp].n = i + 1; cs 275 net/bridge/netfilter/ebtables.c cs[sp].chaininfo = chaininfo; cs 276 net/bridge/netfilter/ebtables.c cs[sp].e = ebt_next_entry(point); cs 546 net/bridge/netfilter/ebtables.c struct ebt_chainstack cs; cs 571 net/bridge/netfilter/ebtables.c udc[*n].cs.chaininfo = (struct ebt_entries *)e; cs 573 net/bridge/netfilter/ebtables.c udc[*n].cs.n = 0; cs 684 net/bridge/netfilter/ebtables.c if ((char *)(cl_s[i].cs.chaininfo) > (char *)e) cs 772 net/bridge/netfilter/ebtables.c e = cl_s[chain_nr].cs.e; cs 775 net/bridge/netfilter/ebtables.c cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; cs 778 net/bridge/netfilter/ebtables.c pos = cl_s[chain_nr].cs.n; cs 780 net/bridge/netfilter/ebtables.c cl_s[chain_nr].cs.n = 0; cs 797 net/bridge/netfilter/ebtables.c if (hlp2 == cl_s[i].cs.chaininfo) cs 803 net/bridge/netfilter/ebtables.c if (cl_s[i].cs.n) cs 809 net/bridge/netfilter/ebtables.c cl_s[i].cs.n = pos + 1; cs 811 net/bridge/netfilter/ebtables.c cl_s[i].cs.e = ebt_next_entry(e); cs 13 net/ceph/string_table.c struct ceph_string *cs, *exist; cs 40 net/ceph/string_table.c cs = kmalloc(sizeof(*cs) + len + 1, GFP_NOFS); cs 41 net/ceph/string_table.c if (!cs) cs 44 net/ceph/string_table.c kref_init(&cs->kref); cs 45 net/ceph/string_table.c cs->len = len; cs 46 net/ceph/string_table.c memcpy(cs->str, str, len); cs 47 net/ceph/string_table.c cs->str[len] = 0; cs 68 net/ceph/string_table.c rb_link_node(&cs->node, parent, p); cs 69 net/ceph/string_table.c rb_insert_color(&cs->node, &string_tree); cs 80 net/ceph/string_table.c kfree(cs); cs 81 net/ceph/string_table.c cs = exist; cs 84 net/ceph/string_table.c return cs; cs 90 net/ceph/string_table.c struct ceph_string *cs = container_of(ref, struct ceph_string, kref); cs 93 net/ceph/string_table.c if (!RB_EMPTY_NODE(&cs->node)) { cs 94 net/ceph/string_table.c rb_erase(&cs->node, &string_tree); cs 95 net/ceph/string_table.c RB_CLEAR_NODE(&cs->node); cs 99 net/ceph/string_table.c kfree_rcu(cs, rcu); cs 31 net/core/netclassid_cgroup.c struct cgroup_cls_state *cs; cs 33 net/core/netclassid_cgroup.c cs = kzalloc(sizeof(*cs), GFP_KERNEL); cs 34 net/core/netclassid_cgroup.c if (!cs) cs 37 net/core/netclassid_cgroup.c return &cs->css; cs 42 net/core/netclassid_cgroup.c struct cgroup_cls_state *cs = css_cls_state(css); cs 46 net/core/netclassid_cgroup.c cs->classid = parent->classid; cs 121 net/core/netclassid_cgroup.c struct cgroup_cls_state *cs = css_cls_state(css); cs 127 net/core/netclassid_cgroup.c cs->classid = (u32)value; cs 131 net/core/netclassid_cgroup.c update_classid_task(p, cs->classid); cs 392 net/mac80211/cfg.c const struct ieee80211_cipher_scheme *cs = NULL; cs 419 net/mac80211/cfg.c cs = ieee80211_cs_get(local, params->cipher, sdata->vif.type); cs 425 net/mac80211/cfg.c cs); cs 491 net/mac80211/cfg.c sta->cipher_scheme = cs; cs 2210 net/mac80211/ieee80211_i.h bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs); cs 2211 net/mac80211/ieee80211_i.h bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n); cs 502 net/mac80211/key.c const struct ieee80211_cipher_scheme *cs) cs 644 net/mac80211/key.c if (cs) { cs 645 net/mac80211/key.c if (seq_len && seq_len != cs->pn_len) { cs 650 net/mac80211/key.c key->conf.iv_len = cs->hdr_len; cs 651 net/mac80211/key.c key->conf.icv_len = cs->mic_len; cs 141 net/mac80211/key.h const struct ieee80211_cipher_scheme *cs); cs 802 net/mac80211/main.c const struct ieee80211_cipher_scheme *cs; cs 804 net/mac80211/main.c cs = local->hw.cipher_schemes; cs 847 net/mac80211/main.c suites[w++] = cs[r].cipher; cs 848 net/mac80211/main.c if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) { cs 1006 net/mac80211/rx.c const struct ieee80211_cipher_scheme *cs) cs 1019 net/mac80211/rx.c if (cs) { cs 1020 net/mac80211/rx.c minlen = hdrlen + cs->hdr_len; cs 1021 net/mac80211/rx.c key_idx_off = hdrlen + cs->key_idx_off; cs 1022 net/mac80211/rx.c key_idx_shift = cs->key_idx_shift; cs 1035 net/mac80211/rx.c if (cs) cs 1036 net/mac80211/rx.c keyid &= cs->key_idx_mask; cs 1883 net/mac80211/rx.c const struct ieee80211_cipher_scheme *cs = NULL; cs 1921 net/mac80211/rx.c cs = rx->sta->cipher_scheme; cs 1922 net/mac80211/rx.c keyid = ieee80211_get_keyid(rx->skb, cs); cs 2008 net/mac80211/rx.c keyidx = ieee80211_get_keyid(rx->skb, cs); cs 3555 net/mac80211/util.c bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs) cs 3557 net/mac80211/util.c return !(cs == NULL || cs->cipher == 0 || cs 3558 net/mac80211/util.c cs->hdr_len < cs->pn_len + cs->pn_off || cs 3559 net/mac80211/util.c cs->hdr_len <= cs->key_idx_off || cs 3560 net/mac80211/util.c cs->key_idx_shift > 7 || cs 3561 net/mac80211/util.c cs->key_idx_mask == 0); cs 3564 net/mac80211/util.c bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n) cs 3569 net/mac80211/util.c WARN_ON((NUM_NL80211_IFTYPES / 8 + 1) > sizeof(cs[0].iftype)); cs 3572 net/mac80211/util.c if (!ieee80211_cs_valid(&cs[i])) cs 3585 net/mac80211/util.c const struct ieee80211_cipher_scheme *cs = NULL; cs 3589 net/mac80211/util.c cs = &l[i]; cs 3594 net/mac80211/util.c if (!cs || !(cs->iftype & BIT(iftype))) cs 3597 net/mac80211/util.c return cs; cs 3604 net/mac80211/util.c const struct ieee80211_cipher_scheme *cs; cs 3609 net/mac80211/util.c cs = ieee80211_cs_get(local, crypto->ciphers_pairwise[i], cs 3612 net/mac80211/util.c if (cs && headroom < cs->hdr_len) cs 3613 net/mac80211/util.c headroom = cs->hdr_len; cs 3616 net/mac80211/util.c cs = ieee80211_cs_get(local, crypto->cipher_group, iftype); cs 3617 net/mac80211/util.c if (cs && headroom < cs->hdr_len) cs 3618 net/mac80211/util.c headroom = cs->hdr_len; cs 843 net/mac80211/wpa.c const struct ieee80211_cipher_scheme *cs = NULL; cs 858 net/mac80211/wpa.c cs = rx->sta->cipher_scheme; cs 860 net/mac80211/wpa.c data_len = rx->skb->len - hdrlen - cs->hdr_len; cs 876 net/mac80211/wpa.c skb_pn = rx->skb->data + hdrlen + cs->pn_off; cs 878 net/mac80211/wpa.c if (ieee80211_crypto_cs_pn_compare(skb_pn, rx_pn, cs->pn_len) <= 0) cs 881 net/mac80211/wpa.c memcpy(rx_pn, skb_pn, cs->pn_len); cs 884 net/mac80211/wpa.c if (pskb_trim(rx->skb, rx->skb->len - cs->mic_len)) cs 887 net/mac80211/wpa.c memmove(rx->skb->data + cs->hdr_len, rx->skb->data, hdrlen); cs 888 net/mac80211/wpa.c skb_pull(rx->skb, cs->hdr_len); cs 489 scripts/kconfig/confdata.c struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); cs 494 scripts/kconfig/confdata.c if (cs->def[def].tri == yes) { cs 496 scripts/kconfig/confdata.c cs->flags &= ~def_flags; cs 500 scripts/kconfig/confdata.c if (cs->def[def].tri != no) cs 502 scripts/kconfig/confdata.c cs->def[def].val = sym; cs 505 scripts/kconfig/confdata.c cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri); cs 811 scripts/kconfig/confdata.c struct symbol *cs; cs 814 scripts/kconfig/confdata.c cs = prop_get_symbol(sym_get_choice_prop(sym)); cs 815 scripts/kconfig/confdata.c ds = sym_choice_default(cs); cs 816 scripts/kconfig/confdata.c if (!sym_is_optional(cs) && sym == ds) { cs 510 scripts/kconfig/symbol.c struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); cs 514 scripts/kconfig/symbol.c cs->def[S_DEF_USER].val = sym; cs 515 scripts/kconfig/symbol.c cs->flags |= SYMBOL_DEF_USER; cs 516 scripts/kconfig/symbol.c prop = sym_get_choice_prop(cs); cs 13 sound/core/pcm_iec958.c u8 *cs, size_t len) cs 69 sound/core/pcm_iec958.c memset(cs, 0, len); cs 71 sound/core/pcm_iec958.c cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE; cs 72 sound/core/pcm_iec958.c cs[1] = IEC958_AES1_CON_GENERAL; cs 73 sound/core/pcm_iec958.c cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC; cs 74 sound/core/pcm_iec958.c cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | fs; cs 77 sound/core/pcm_iec958.c cs[4] = ws; cs 95 sound/core/pcm_iec958.c int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs, cs 100 sound/core/pcm_iec958.c cs, len); cs 118 sound/core/pcm_iec958.c u8 *cs, size_t len) cs 121 sound/core/pcm_iec958.c cs, len); cs 633 sound/pci/emu10k1/emupcm.c unsigned int voice, stereo, i, ccis, cra = 64, cs, sample; cs 643 sound/pci/emu10k1/emupcm.c cs = (sample == 0) ? (32-ccis) : (64-ccis+1) >> 1; cs 644 sound/pci/emu10k1/emupcm.c if (cs > 16) cs = 16; cs 645 sound/pci/emu10k1/emupcm.c for (i = 0; i < cs; i++) { cs 473 sound/pci/ice1712/aureon.c static void aureon_spi_write(struct snd_ice1712 *ice, unsigned int cs, unsigned int data, int bits) cs 495 sound/pci/ice1712/aureon.c tmp &= ~cs; cs 515 sound/pci/ice1712/aureon.c tmp |= cs; cs 526 sound/pci/ice1712/aureon.c static void aureon_spi_read(struct snd_ice1712 *ice, unsigned int cs, cs 534 sound/pci/ice1712/aureon.c tmp &= ~cs; cs 574 sound/pci/ice1712/aureon.c tmp |= cs; cs 197 sound/pci/ice1712/phase.c static void phase28_spi_write(struct snd_ice1712 *ice, unsigned int cs, cs 208 sound/pci/ice1712/phase.c tmp &= ~cs; cs 228 sound/pci/ice1712/phase.c tmp |= cs; cs 89 sound/soc/kirkwood/kirkwood-dma.c const struct mbus_dram_window *cs = dram->cs + i; cs 90 sound/soc/kirkwood/kirkwood-dma.c if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) { cs 91 sound/soc/kirkwood/kirkwood-dma.c writel(cs->base & 0xffff0000, cs 93 sound/soc/kirkwood/kirkwood-dma.c writel(((cs->size - 1) & 0xffff0000) | cs 94 sound/soc/kirkwood/kirkwood-dma.c (cs->mbus_attr << 8) | cs 185 sound/soc/meson/axg-spdifout.c u8 cs[4]; cs 188 sound/soc/meson/axg-spdifout.c ret = snd_pcm_create_iec958_consumer_hw_params(params, cs, 4); cs 194 sound/soc/meson/axg-spdifout.c val = cs[0] | cs[1] << 8 | cs[2] << 16 | cs[3] << 24; cs 936 sound/soc/stm/stm32_sai_sub.c unsigned char *cs = sai->iec958.status; cs 938 sound/soc/stm/stm32_sai_sub.c cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE; cs 939 sound/soc/stm/stm32_sai_sub.c cs[1] = IEC958_AES1_CON_GENERAL; cs 940 sound/soc/stm/stm32_sai_sub.c cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC; cs 941 sound/soc/stm/stm32_sai_sub.c cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | IEC958_AES3_CON_FS_NOTID; cs 244 sound/soc/stm/stm32_spdifrx.c unsigned char cs[SPDIFRX_CS_BYTES_NB]; cs 279 sound/soc/stm/stm32_spdifrx.c spdifrx->cs[i] = (unsigned char)SPDIFRX_CSR_CSGET(*ptr); cs 481 sound/soc/stm/stm32_spdifrx.c memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB); cs 527 sound/soc/stm/stm32_spdifrx.c ucontrol->value.iec958.status[0] = spdifrx->cs[0]; cs 528 sound/soc/stm/stm32_spdifrx.c ucontrol->value.iec958.status[1] = spdifrx->cs[1]; cs 529 sound/soc/stm/stm32_spdifrx.c ucontrol->value.iec958.status[2] = spdifrx->cs[2]; cs 530 sound/soc/stm/stm32_spdifrx.c ucontrol->value.iec958.status[3] = spdifrx->cs[3]; cs 531 sound/soc/stm/stm32_spdifrx.c ucontrol->value.iec958.status[4] = spdifrx->cs[4]; cs 27 sound/usb/clock.c void *cs = NULL; cs 29 sound/usb/clock.c while ((cs = snd_usb_find_csint_desc(iface->extra, iface->extralen, cs 30 sound/usb/clock.c cs, type))) { cs 31 sound/usb/clock.c if (validator(cs, id)) cs 32 sound/usb/clock.c return cs; cs 40 sound/usb/clock.c struct uac_clock_source_descriptor *cs = p; cs 41 sound/usb/clock.c return cs->bClockID == id; cs 46 sound/usb/clock.c struct uac3_clock_source_descriptor *cs = p; cs 47 sound/usb/clock.c return cs->bClockID == id; cs 52 sound/usb/clock.c struct uac_clock_selector_descriptor *cs = p; cs 53 sound/usb/clock.c return cs->bClockID == id; cs 58 sound/usb/clock.c struct uac3_clock_selector_descriptor *cs = p; cs 59 sound/usb/clock.c return cs->bClockID == id; cs 64 sound/usb/clock.c struct uac_clock_multiplier_descriptor *cs = p; cs 65 sound/usb/clock.c return cs->bClockID == id; cs 70 sound/usb/clock.c struct uac3_clock_multiplier_descriptor *cs = p; cs 71 sound/usb/clock.c return cs->bClockID == id; cs 151 tools/arch/x86/include/uapi/asm/kvm.h struct kvm_segment cs, ds, es, fs, gs, ss; cs 10 tools/build/feature/test-pthread-attr-setaffinity-np.c cpu_set_t cs; cs 13 tools/build/feature/test-pthread-attr-setaffinity-np.c CPU_ZERO(&cs); cs 15 tools/build/feature/test-pthread-attr-setaffinity-np.c ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs); cs 22 tools/perf/util/comm.c static struct comm_str *comm_str__get(struct comm_str *cs) cs 24 tools/perf/util/comm.c if (cs && refcount_inc_not_zero(&cs->refcnt)) cs 25 tools/perf/util/comm.c return cs; cs 30 tools/perf/util/comm.c static void comm_str__put(struct comm_str *cs) cs 32 tools/perf/util/comm.c if (cs && refcount_dec_and_test(&cs->refcnt)) { cs 34 tools/perf/util/comm.c rb_erase(&cs->rb_node, &comm_str_root); cs 36 tools/perf/util/comm.c zfree(&cs->str); cs 37 tools/perf/util/comm.c free(cs); cs 43 tools/perf/util/comm.c struct comm_str *cs; cs 45 tools/perf/util/comm.c cs = zalloc(sizeof(*cs)); cs 46 tools/perf/util/comm.c if (!cs) cs 49 tools/perf/util/comm.c cs->str = strdup(str); cs 50 tools/perf/util/comm.c if (!cs->str) { cs 51 tools/perf/util/comm.c free(cs); cs 55 tools/perf/util/comm.c refcount_set(&cs->refcnt, 1); cs 57 tools/perf/util/comm.c return cs; cs 99 tools/perf/util/comm.c struct comm_str *cs; cs 102 tools/perf/util/comm.c cs = __comm_str__findnew(str, root); cs 105 tools/perf/util/comm.c return cs; cs 663 tools/perf/util/data-convert-bt.c static int ctf_stream__flush(struct ctf_stream *cs) cs 667 tools/perf/util/data-convert-bt.c if (cs) { cs 668 tools/perf/util/data-convert-bt.c err = bt_ctf_stream_flush(cs->stream); cs 670 tools/perf/util/data-convert-bt.c pr_err("CTF stream %d flush failed\n", cs->cpu); cs 673 tools/perf/util/data-convert-bt.c cs->cpu, cs->count); cs 675 tools/perf/util/data-convert-bt.c cs->count = 0; cs 683 tools/perf/util/data-convert-bt.c struct ctf_stream *cs; cs 689 tools/perf/util/data-convert-bt.c cs = zalloc(sizeof(*cs)); cs 690 tools/perf/util/data-convert-bt.c if (!cs) { cs 722 tools/perf/util/data-convert-bt.c cs->cpu = cpu; cs 723 tools/perf/util/data-convert-bt.c cs->stream = stream; cs 724 tools/perf/util/data-convert-bt.c return cs; cs 732 tools/perf/util/data-convert-bt.c free(cs); cs 736 tools/perf/util/data-convert-bt.c static void ctf_stream__delete(struct ctf_stream *cs) cs 738 tools/perf/util/data-convert-bt.c if (cs) { cs 739 tools/perf/util/data-convert-bt.c bt_ctf_stream_put(cs->stream); cs 740 tools/perf/util/data-convert-bt.c free(cs); cs 746 tools/perf/util/data-convert-bt.c struct ctf_stream *cs = cw->stream[cpu]; cs 748 tools/perf/util/data-convert-bt.c if (!cs) { cs 749 tools/perf/util/data-convert-bt.c cs = ctf_stream__create(cw, cpu); cs 750 tools/perf/util/data-convert-bt.c cw->stream[cpu] = cs; cs 753 tools/perf/util/data-convert-bt.c return cs; cs 781 tools/perf/util/data-convert-bt.c static bool is_flush_needed(struct ctf_stream *cs) cs 783 tools/perf/util/data-convert-bt.c return cs->count >= STREAM_FLUSH_COUNT; cs 795 tools/perf/util/data-convert-bt.c struct ctf_stream *cs; cs 844 tools/perf/util/data-convert-bt.c cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); cs 845 tools/perf/util/data-convert-bt.c if (cs) { cs 846 tools/perf/util/data-convert-bt.c if (is_flush_needed(cs)) cs 847 tools/perf/util/data-convert-bt.c ctf_stream__flush(cs); cs 849 tools/perf/util/data-convert-bt.c cs->count++; cs 850 tools/perf/util/data-convert-bt.c bt_ctf_stream_append_event(cs->stream, event); cs 854 tools/perf/util/data-convert-bt.c return cs ? 0 : -1; cs 874 tools/perf/util/data-convert-bt.c struct ctf_stream *cs; \ cs 887 tools/perf/util/data-convert-bt.c cs = ctf_stream(cw, 0); \ cs 888 tools/perf/util/data-convert-bt.c if (cs) { \ cs 889 tools/perf/util/data-convert-bt.c if (is_flush_needed(cs)) \ cs 890 tools/perf/util/data-convert-bt.c ctf_stream__flush(cs); \ cs 892 tools/perf/util/data-convert-bt.c cs->count++; \ cs 893 tools/perf/util/data-convert-bt.c bt_ctf_stream_append_event(cs->stream, event); \ cs 137 tools/testing/selftests/kvm/include/x86_64/processor.h uint16_t cs; cs 140 tools/testing/selftests/kvm/include/x86_64/processor.h : /* output */ [cs]"=rm"(cs)); cs 141 tools/testing/selftests/kvm/include/x86_64/processor.h return cs; cs 192 tools/testing/selftests/kvm/lib/x86_64/processor.c segment_dump(stream, &sregs->cs, indent + 2); cs 630 tools/testing/selftests/kvm/lib/x86_64/processor.c kvm_seg_set_kernel_code_64bit(vm, 0x8, &sregs.cs); cs 265 tools/testing/selftests/x86/entry_from_vm86.c v86.regs.cs = load_addr / 16; cs 273 tools/testing/selftests/x86/entry_from_vm86.c assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ cs 332 tools/testing/selftests/x86/entry_from_vm86.c v86.regs.cs = 0; cs 304 tools/testing/selftests/x86/sigreturn.c unsigned short cs, gs, fs, ss; cs 316 tools/testing/selftests/x86/sigreturn.c return &sels->cs; cs 338 tools/testing/selftests/x86/sigreturn.c int cs_bitness(unsigned short cs) cs 346 tools/testing/selftests/x86/sigreturn.c : [cs] "r" (cs)); cs 371 tools/testing/selftests/x86/sigreturn.c bool is_valid_ss(unsigned short cs) cs 379 tools/testing/selftests/x86/sigreturn.c : [cs] "r" (cs)); cs 580 tools/testing/selftests/x86/sigreturn.c int cs = find_cs(cs_bits); cs 581 tools/testing/selftests/x86/sigreturn.c if (cs == -1) { cs 602 tools/testing/selftests/x86/sigreturn.c sig_cs = cs; cs 665 tools/testing/selftests/x86/sigreturn.c if (req_sels->cs != res_sels->cs) { cs 667 tools/testing/selftests/x86/sigreturn.c req_sels->cs, res_sels->cs); cs 706 tools/testing/selftests/x86/sigreturn.c int cs = force_cs == -1 ? find_cs(cs_bits) : force_cs; cs 707 tools/testing/selftests/x86/sigreturn.c if (cs == -1) cs 710 tools/testing/selftests/x86/sigreturn.c sig_cs = cs; cs 379 tools/testing/selftests/x86/test_syscall_vdso.c int cs; cs 383 tools/testing/selftests/x86/test_syscall_vdso.c : "=a" (cs) cs 385 tools/testing/selftests/x86/test_syscall_vdso.c kernel_is_64bit = (cs == 0x23);