block             449 arch/arm/kernel/setup.c 	int block;
block             455 arch/arm/kernel/setup.c 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
block             456 arch/arm/kernel/setup.c 	if (block >= 2)
block             458 arch/arm/kernel/setup.c 	if (block >= 1)
block             462 arch/arm/kernel/setup.c 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
block             463 arch/arm/kernel/setup.c 	if (block >= 5)
block             469 arch/arm/kernel/setup.c 	block = cpuid_feature_extract_field(isar5, 4);
block             470 arch/arm/kernel/setup.c 	if (block >= 2)
block             472 arch/arm/kernel/setup.c 	if (block >= 1)
block             475 arch/arm/kernel/setup.c 	block = cpuid_feature_extract_field(isar5, 8);
block             476 arch/arm/kernel/setup.c 	if (block >= 1)
block             479 arch/arm/kernel/setup.c 	block = cpuid_feature_extract_field(isar5, 12);
block             480 arch/arm/kernel/setup.c 	if (block >= 1)
block             483 arch/arm/kernel/setup.c 	block = cpuid_feature_extract_field(isar5, 16);
block             484 arch/arm/kernel/setup.c 	if (block >= 1)
block             513 arch/arm/mach-cns3xxx/cns3xxx.h void cns3xxx_pwr_soft_rst(unsigned int block);
block             514 arch/arm/mach-cns3xxx/cns3xxx.h void cns3xxx_pwr_clk_en(unsigned int block);
block              15 arch/arm/mach-cns3xxx/pm.c void cns3xxx_pwr_clk_en(unsigned int block)
block              19 arch/arm/mach-cns3xxx/pm.c 	reg |= (block & PM_CLK_GATE_REG_MASK);
block              24 arch/arm/mach-cns3xxx/pm.c void cns3xxx_pwr_clk_dis(unsigned int block)
block              28 arch/arm/mach-cns3xxx/pm.c 	reg &= ~(block & PM_CLK_GATE_REG_MASK);
block              33 arch/arm/mach-cns3xxx/pm.c void cns3xxx_pwr_power_up(unsigned int block)
block              37 arch/arm/mach-cns3xxx/pm.c 	reg &= ~(block & CNS3XXX_PWR_PLL_ALL);
block              45 arch/arm/mach-cns3xxx/pm.c void cns3xxx_pwr_power_down(unsigned int block)
block              50 arch/arm/mach-cns3xxx/pm.c 	reg |= (block & CNS3XXX_PWR_PLL_ALL);
block              55 arch/arm/mach-cns3xxx/pm.c static void cns3xxx_pwr_soft_rst_force(unsigned int block)
block              63 arch/arm/mach-cns3xxx/pm.c 	if (block & 0x30000001) {
block              64 arch/arm/mach-cns3xxx/pm.c 		reg &= ~(block & PM_SOFT_RST_REG_MASK);
block              66 arch/arm/mach-cns3xxx/pm.c 		reg &= ~(block & PM_SOFT_RST_REG_MASK);
block              68 arch/arm/mach-cns3xxx/pm.c 		reg |= (block & PM_SOFT_RST_REG_MASK);
block              74 arch/arm/mach-cns3xxx/pm.c void cns3xxx_pwr_soft_rst(unsigned int block)
block              78 arch/arm/mach-cns3xxx/pm.c 	if (soft_reset & block) {
block              82 arch/arm/mach-cns3xxx/pm.c 		soft_reset |= block;
block              84 arch/arm/mach-cns3xxx/pm.c 	cns3xxx_pwr_soft_rst_force(block);
block              13 arch/arm/mach-cns3xxx/pm.h void cns3xxx_pwr_clk_en(unsigned int block);
block              14 arch/arm/mach-cns3xxx/pm.h void cns3xxx_pwr_clk_dis(unsigned int block);
block              15 arch/arm/mach-cns3xxx/pm.h void cns3xxx_pwr_power_up(unsigned int block);
block              16 arch/arm/mach-cns3xxx/pm.h void cns3xxx_pwr_power_down(unsigned int block);
block             267 arch/ia64/kernel/perfmon.c 	unsigned int block:1;		/* when 1, task will blocked on user notifications */
block             360 arch/ia64/kernel/perfmon.c #define ctx_fl_block		ctx_flags.block
block             265 arch/mips/alchemy/common/usb.c static inline int au1300_usb_control(int block, int enable)
block             271 arch/mips/alchemy/common/usb.c 	switch (block) {
block             360 arch/mips/alchemy/common/usb.c static inline int au1200_usb_control(int block, int enable)
block             365 arch/mips/alchemy/common/usb.c 	switch (block) {
block             458 arch/mips/alchemy/common/usb.c static inline int au1000_usb_control(int block, int enable, unsigned long rb,
block             463 arch/mips/alchemy/common/usb.c 	switch (block) {
block             478 arch/mips/alchemy/common/usb.c int alchemy_usb_control(int block, int enable)
block             488 arch/mips/alchemy/common/usb.c 		ret = au1000_usb_control(block, enable,
block             492 arch/mips/alchemy/common/usb.c 		ret = au1000_usb_control(block, enable,
block             496 arch/mips/alchemy/common/usb.c 		ret = au1200_usb_control(block, enable);
block             499 arch/mips/alchemy/common/usb.c 		ret = au1300_usb_control(block, enable);
block              23 arch/mips/alchemy/common/vss.c static inline void __enable_block(int block)
block              25 arch/mips/alchemy/common/vss.c 	void __iomem *base = (void __iomem *)VSS_ADDR(block);
block              54 arch/mips/alchemy/common/vss.c static inline void __disable_block(int block)
block              56 arch/mips/alchemy/common/vss.c 	void __iomem *base = (void __iomem *)VSS_ADDR(block);
block              70 arch/mips/alchemy/common/vss.c void au1300_vss_block_control(int block, int enable)
block              80 arch/mips/alchemy/common/vss.c 		__enable_block(block);
block              82 arch/mips/alchemy/common/vss.c 		__disable_block(block);
block              56 arch/mips/cavium-octeon/crypto/octeon-md5.c 	const u64 *block = _block;
block              58 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[0], 0);
block              59 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[1], 1);
block              60 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[2], 2);
block              61 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[3], 3);
block              62 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[4], 4);
block              63 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[5], 5);
block              64 arch/mips/cavium-octeon/crypto/octeon-md5.c 	write_octeon_64bit_block_dword(block[6], 6);
block              65 arch/mips/cavium-octeon/crypto/octeon-md5.c 	octeon_md5_start(block[7]);
block              85 arch/mips/cavium-octeon/crypto/octeon-md5.c 	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
block              92 arch/mips/cavium-octeon/crypto/octeon-md5.c 		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block              97 arch/mips/cavium-octeon/crypto/octeon-md5.c 	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
block             103 arch/mips/cavium-octeon/crypto/octeon-md5.c 	octeon_md5_transform(mctx->block);
block             107 arch/mips/cavium-octeon/crypto/octeon-md5.c 	while (len >= sizeof(mctx->block)) {
block             109 arch/mips/cavium-octeon/crypto/octeon-md5.c 		data += sizeof(mctx->block);
block             110 arch/mips/cavium-octeon/crypto/octeon-md5.c 		len -= sizeof(mctx->block);
block             116 arch/mips/cavium-octeon/crypto/octeon-md5.c 	memcpy(mctx->block, data, len);
block             125 arch/mips/cavium-octeon/crypto/octeon-md5.c 	char *p = (char *)mctx->block + offset;
block             137 arch/mips/cavium-octeon/crypto/octeon-md5.c 		octeon_md5_transform(mctx->block);
block             138 arch/mips/cavium-octeon/crypto/octeon-md5.c 		p = (char *)mctx->block;
block             143 arch/mips/cavium-octeon/crypto/octeon-md5.c 	mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
block             144 arch/mips/cavium-octeon/crypto/octeon-md5.c 	mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
block             145 arch/mips/cavium-octeon/crypto/octeon-md5.c 	octeon_md5_transform(mctx->block);
block              62 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	const u64 *block = _block;
block              64 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[0], 0);
block              65 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[1], 1);
block              66 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[2], 2);
block              67 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[3], 3);
block              68 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[4], 4);
block              69 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[5], 5);
block              70 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	write_octeon_64bit_block_dword(block[6], 6);
block              71 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	octeon_sha1_start(block[7]);
block              54 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	const u64 *block = _block;
block              56 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[0], 0);
block              57 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[1], 1);
block              58 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[2], 2);
block              59 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[3], 3);
block              60 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[4], 4);
block              61 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[5], 5);
block              62 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	write_octeon_64bit_block_dword(block[6], 6);
block              63 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	octeon_sha256_start(block[7]);
block              57 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	const u64 *block = _block;
block              59 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[0], 0);
block              60 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[1], 1);
block              61 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[2], 2);
block              62 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[3], 3);
block              63 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[4], 4);
block              64 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[5], 5);
block              65 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[6], 6);
block              66 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[7], 7);
block              67 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[8], 8);
block              68 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[9], 9);
block              69 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[10], 10);
block              70 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[11], 11);
block              71 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[12], 12);
block              72 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[13], 13);
block              73 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_block_sha512(block[14], 14);
block              74 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	octeon_sha512_start(block[15]);
block              51 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
block              54 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c 	cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
block              55 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c 		       cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
block             227 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c 	cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
block             232 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
block             235 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c 	cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
block             236 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c 		       cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
block             268 arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c 	cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
block              42 arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
block              50 arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c void __cvmx_interrupt_asxx_enable(int block)
block              65 arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c 	csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
block              69 arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c 	cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
block            2588 arch/mips/cavium-octeon/octeon-irq.c 		int block = intsn >> 12;
block            2591 arch/mips/cavium-octeon/octeon-irq.c 		domain = ciu3_info->domain[block];
block            2592 arch/mips/cavium-octeon/octeon-irq.c 		if (ciu3_info->intsn2hw[block])
block            2593 arch/mips/cavium-octeon/octeon-irq.c 			hw = ciu3_info->intsn2hw[block](domain, intsn);
block            2972 arch/mips/cavium-octeon/octeon-irq.c struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
block            2977 arch/mips/cavium-octeon/octeon-irq.c 	return ciu3_info->domain[block];
block             794 arch/mips/include/asm/mach-au1x00/au1000.h int alchemy_usb_control(int block, int enable);
block             889 arch/mips/include/asm/mach-au1x00/au1000.h extern void au1300_vss_block_control(int block, int enable);
block              26 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h 	# a3 = address of boot descriptor block
block             425 arch/mips/include/asm/mips-cm.h 			       unsigned int vp, unsigned int block);
block             438 arch/mips/include/asm/mips-cm.h 				      unsigned int vp, unsigned int block) { }
block             452 arch/mips/include/asm/mips-cm.h static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
block             456 arch/mips/include/asm/mips-cm.h 	mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block);
block              58 arch/mips/include/asm/octeon/cvmx-asxx-defs.h void __cvmx_interrupt_asxx_enable(int block);
block             337 arch/mips/include/asm/octeon/cvmx-pcsx-defs.h void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
block             364 arch/mips/include/asm/octeon/octeon.h struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block);
block             257 arch/mips/kernel/mips-cm.c 			unsigned int vp, unsigned int block)
block             272 arch/mips/kernel/mips-cm.c 			val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK);
block             275 arch/mips/kernel/mips-cm.c 			WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
block             291 arch/mips/kernel/mips-cm.c 		WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
block              55 arch/powerpc/crypto/md5-glue.c 		memcpy((char *)sctx->block + offset, src, len);
block              60 arch/powerpc/crypto/md5-glue.c 		memcpy((char *)sctx->block + offset, src, avail);
block              61 arch/powerpc/crypto/md5-glue.c 		ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1);
block              72 arch/powerpc/crypto/md5-glue.c 	memcpy((char *)sctx->block, src, len);
block              80 arch/powerpc/crypto/md5-glue.c 	const u8 *src = (const u8 *)sctx->block;
block              83 arch/powerpc/crypto/md5-glue.c 	__le64 *pbits = (__le64 *)((char *)sctx->block + 56);
block              91 arch/powerpc/crypto/md5-glue.c 		p = (char *)sctx->block;
block             462 arch/powerpc/include/asm/kvm_book3s.h 	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
block             465 arch/powerpc/include/asm/kvm_book3s.h 	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
block             467 arch/powerpc/include/asm/kvm_book3s.h 	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
block              30 arch/powerpc/include/asm/rheap.h 	rh_block_t *block;
block              57 arch/powerpc/include/asm/rheap.h 		    rh_block_t * block);
block              47 arch/powerpc/lib/rheap.c 	rh_block_t *block, *blk;
block              57 arch/powerpc/lib/rheap.c 	block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC);
block              58 arch/powerpc/lib/rheap.c 	if (block == NULL)
block              64 arch/powerpc/lib/rheap.c 		memcpy(block, info->block,
block              67 arch/powerpc/lib/rheap.c 		delta = (char *)block - (char *)info->block;
block              70 arch/powerpc/lib/rheap.c 		blks = (unsigned long)info->block;
block              71 arch/powerpc/lib/rheap.c 		blke = (unsigned long)(info->block + info->max_blocks);
block              73 arch/powerpc/lib/rheap.c 		for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
block              82 arch/powerpc/lib/rheap.c 			kfree(info->block);
block              85 arch/powerpc/lib/rheap.c 	info->block = block;
block              91 arch/powerpc/lib/rheap.c 	blk = block + info->max_blocks - new_blocks;
block             268 arch/powerpc/lib/rheap.c 	info->block = NULL;
block             288 arch/powerpc/lib/rheap.c 		kfree(info->block);
block             301 arch/powerpc/lib/rheap.c 	     rh_block_t * block)
block             313 arch/powerpc/lib/rheap.c 	info->block = block;
block             323 arch/powerpc/lib/rheap.c 	for (i = 0, blk = block; i < max_blocks; i++, blk++)
block              87 arch/powerpc/mm/kasan/kasan_init_32.c 	void *block = NULL;
block              94 arch/powerpc/mm/kasan/kasan_init_32.c 		block = memblock_alloc(k_end - k_start, PAGE_SIZE);
block              98 arch/powerpc/mm/kasan/kasan_init_32.c 		void *va = block ? block + k_cur - k_start : kasan_get_one_page();
block              50 arch/s390/boot/mem_detect.c 	struct mem_detect_block *block;
block              53 arch/s390/boot/mem_detect.c 		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
block              54 arch/s390/boot/mem_detect.c 		if (block->end == start) {
block              55 arch/s390/boot/mem_detect.c 			block->end = end;
block              60 arch/s390/boot/mem_detect.c 	block = __get_mem_detect_block_ptr(mem_detect.count);
block              61 arch/s390/boot/mem_detect.c 	block->start = start;
block              62 arch/s390/boot/mem_detect.c 	block->end = end;
block             551 arch/s390/crypto/aes_s390.c 		u8 block[16];
block             562 arch/s390/crypto/aes_s390.c 	memset(pcc_param.block, 0, sizeof(pcc_param.block));
block             442 arch/s390/crypto/paes_s390.c 		u8 block[16];
block             229 arch/s390/kernel/sthyi.c 			  struct diag204_x_phys_block *block,
block             234 arch/s390/kernel/sthyi.c 	for (i = 0; i < block->hdr.cpus; i++) {
block             235 arch/s390/kernel/sthyi.c 		switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
block             237 arch/s390/kernel/sthyi.c 			if (block->cpus[i].weight == DED_WEIGHT)
block             243 arch/s390/kernel/sthyi.c 			if (block->cpus[i].weight == DED_WEIGHT)
block             257 arch/s390/kernel/sthyi.c 						 struct diag204_x_part_block *block)
block             262 arch/s390/kernel/sthyi.c 	for (i = 0; i < block->hdr.rcpus; i++) {
block             263 arch/s390/kernel/sthyi.c 		if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE))
block             266 arch/s390/kernel/sthyi.c 		switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
block             269 arch/s390/kernel/sthyi.c 			if (block->cpus[i].cur_weight < DED_WEIGHT)
block             270 arch/s390/kernel/sthyi.c 				weight_cp |= block->cpus[i].cur_weight;
block             274 arch/s390/kernel/sthyi.c 			if (block->cpus[i].cur_weight < DED_WEIGHT)
block             275 arch/s390/kernel/sthyi.c 				weight_ifl |= block->cpus[i].cur_weight;
block             284 arch/s390/kernel/sthyi.c 		capped |= block->cpus[i].cflag & DIAG204_CPU_CAPPED;
block             285 arch/s390/kernel/sthyi.c 		cpu_inf->lpar_cap |= block->cpus[i].cpu_type_cap;
block             286 arch/s390/kernel/sthyi.c 		cpu_inf->lpar_grp_cap |= block->cpus[i].group_cpu_type_cap;
block             288 arch/s390/kernel/sthyi.c 		if (block->cpus[i].weight == DED_WEIGHT)
block             300 arch/s390/kernel/sthyi.c 	return (struct diag204_x_part_block *)&block->cpus[i];
block             749 arch/s390/mm/fault.c 			goto block;
block             764 arch/s390/mm/fault.c block:
block              54 arch/sparc/crypto/md5_glue.c 		memcpy((u8 *)sctx->block + partial, data, done);
block              55 arch/sparc/crypto/md5_glue.c 		md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1);
block              64 arch/sparc/crypto/md5_glue.c 	memcpy(sctx->block, data + done, len - done);
block              76 arch/sparc/crypto/md5_glue.c 		memcpy((u8 *)sctx->block + partial, data, len);
block             101 arch/sparc/crypto/md5_glue.c 		memcpy((u8 *)sctx->block + index, padding, padlen);
block             164 arch/sparc/include/asm/viking.h static inline void viking_get_dcache_ptag(int set, int block,
block             167 arch/sparc/include/asm/viking.h 	unsigned long ptag = ((set & 0x7f) << 5) | ((block & 0x3) << 26) |
block            2628 arch/sparc/mm/init_64.c 			void *block = vmemmap_alloc_block(PMD_SIZE, node);
block            2630 arch/sparc/mm/init_64.c 			if (!block)
block            2633 arch/sparc/mm/init_64.c 			pmd_val(*pmd) = pte_base | __pa(block);
block              36 arch/x86/include/asm/amd_nb.h 	unsigned int	 block;			/* Number within bank */
block             354 arch/x86/kernel/cpu/mce/amd.c 		       b->bank, b->block, b->address, hi, lo);
block             369 arch/x86/kernel/cpu/mce/amd.c 		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
block             478 arch/x86/kernel/cpu/mce/amd.c static u32 smca_get_block_address(unsigned int bank, unsigned int block,
block             481 arch/x86/kernel/cpu/mce/amd.c 	if (!block)
block             487 arch/x86/kernel/cpu/mce/amd.c 	return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
block             491 arch/x86/kernel/cpu/mce/amd.c 			     unsigned int bank, unsigned int block,
block             496 arch/x86/kernel/cpu/mce/amd.c 	if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
block             500 arch/x86/kernel/cpu/mce/amd.c 		return smca_get_block_address(bank, block, cpu);
block             503 arch/x86/kernel/cpu/mce/amd.c 	switch (block) {
block             519 arch/x86/kernel/cpu/mce/amd.c prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
block             527 arch/x86/kernel/cpu/mce/amd.c 	if (!block)
block             533 arch/x86/kernel/cpu/mce/amd.c 	b.block			= block;
block             628 arch/x86/kernel/cpu/mce/amd.c 	unsigned int bank, block, cpu = smp_processor_id();
block             639 arch/x86/kernel/cpu/mce/amd.c 		for (block = 0; block < NR_BLOCKS; ++block) {
block             640 arch/x86/kernel/cpu/mce/amd.c 			address = get_block_address(address, low, high, bank, block, cpu);
block             654 arch/x86/kernel/cpu/mce/amd.c 			offset = prepare_threshold_block(bank, block, address, offset, high);
block             987 arch/x86/kernel/cpu/mce/amd.c static void log_and_reset_block(struct threshold_block *block)
block             992 arch/x86/kernel/cpu/mce/amd.c 	if (!block)
block             995 arch/x86/kernel/cpu/mce/amd.c 	if (rdmsr_safe(block->address, &low, &high))
block            1002 arch/x86/kernel/cpu/mce/amd.c 	log_error_thresholding(block->bank, ((u64)high << 32) | low);
block            1006 arch/x86/kernel/cpu/mce/amd.c 	tr.b = block;
block            1016 arch/x86/kernel/cpu/mce/amd.c 	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
block            1032 arch/x86/kernel/cpu/mce/amd.c 		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
block            1033 arch/x86/kernel/cpu/mce/amd.c 			log_and_reset_block(block);
block            1188 arch/x86/kernel/cpu/mce/amd.c 		if (b->block < ARRAY_SIZE(smca_umc_block_names))
block            1189 arch/x86/kernel/cpu/mce/amd.c 			return smca_umc_block_names[b->block];
block            1203 arch/x86/kernel/cpu/mce/amd.c 				     unsigned int bank, unsigned int block,
block            1210 arch/x86/kernel/cpu/mce/amd.c 	if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
block            1217 arch/x86/kernel/cpu/mce/amd.c 		if (block)
block            1231 arch/x86/kernel/cpu/mce/amd.c 	b->block		= block;
block            1257 arch/x86/kernel/cpu/mce/amd.c 	address = get_block_address(address, low, high, bank, ++block, cpu);
block            1261 arch/x86/kernel/cpu/mce/amd.c 	err = allocate_threshold_blocks(cpu, tb, bank, block, address);
block             641 arch/x86/kernel/cpu/mtrr/generic.c 	int block = -1, range;
block             645 arch/x86/kernel/cpu/mtrr/generic.c 	while (fixed_range_blocks[++block].ranges) {
block             646 arch/x86/kernel/cpu/mtrr/generic.c 		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
block             647 arch/x86/kernel/cpu/mtrr/generic.c 			set_fixed_range(fixed_range_blocks[block].base_msr + range,
block             198 arch/x86/kernel/step.c static void enable_step(struct task_struct *child, bool block)
block             207 arch/x86/kernel/step.c 	if (enable_single_step(child) && block)
block              75 arch/x86/platform/intel-mid/device_libs/platform_msic.c void *msic_generic_platform_data(void *info, enum intel_msic_block block)
block              79 arch/x86/platform/intel-mid/device_libs/platform_msic.c 	BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
block              80 arch/x86/platform/intel-mid/device_libs/platform_msic.c 	msic_pdata.irq[block] = entry->irq;
block              13 arch/x86/platform/intel-mid/device_libs/platform_msic.h void *msic_generic_platform_data(void *info, enum intel_msic_block block);
block            1627 block/genhd.c  	int			block;		/* event blocking depth */
block            1700 block/genhd.c  	cancel = !ev->block++;
block            1717 block/genhd.c  	if (WARN_ON_ONCE(ev->block <= 0))
block            1720 block/genhd.c  	if (--ev->block)
block            1771 block/genhd.c  	if (!ev->block)
block            1865 block/genhd.c  	if (!ev->block && intv)
block            2034 block/genhd.c  	ev->block = 1;
block            2078 block/genhd.c  	WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
block             310 block/partitions/acorn.c 				   unsigned long block)
block             313 block/partitions/acorn.c 	unsigned char *data = read_part_sector(state, block, &sect);
block             657 block/partitions/ldm.c static u64 ldm_get_vnum (const u8 *block)
block             662 block/partitions/ldm.c 	BUG_ON (!block);
block             664 block/partitions/ldm.c 	length = *block++;
block             668 block/partitions/ldm.c 			tmp = (tmp << 8) | *block++;
block             692 block/partitions/ldm.c static int ldm_get_vstr (const u8 *block, u8 *buffer, int buflen)
block             696 block/partitions/ldm.c 	BUG_ON (!block || !buffer);
block             698 block/partitions/ldm.c 	length = block[0];
block             703 block/partitions/ldm.c 	memcpy (buffer, block + 1, length);
block             157 crypto/cast6_generic.c static inline void Q(u32 *block, u8 *Kr, u32 *Km)
block             160 crypto/cast6_generic.c 	block[2] ^= F1(block[3], Kr[0], Km[0]);
block             161 crypto/cast6_generic.c 	block[1] ^= F2(block[2], Kr[1], Km[1]);
block             162 crypto/cast6_generic.c 	block[0] ^= F3(block[1], Kr[2], Km[2]);
block             163 crypto/cast6_generic.c 	block[3] ^= F1(block[0], Kr[3], Km[3]);
block             167 crypto/cast6_generic.c static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
block             170 crypto/cast6_generic.c 	block[3] ^= F1(block[0], Kr[3], Km[3]);
block             171 crypto/cast6_generic.c 	block[0] ^= F3(block[1], Kr[2], Km[2]);
block             172 crypto/cast6_generic.c 	block[1] ^= F2(block[2], Kr[1], Km[1]);
block             173 crypto/cast6_generic.c 	block[2] ^= F1(block[3], Kr[0], Km[0]);
block             180 crypto/cast6_generic.c 	u32 block[4];
block             184 crypto/cast6_generic.c 	block[0] = be32_to_cpu(src[0]);
block             185 crypto/cast6_generic.c 	block[1] = be32_to_cpu(src[1]);
block             186 crypto/cast6_generic.c 	block[2] = be32_to_cpu(src[2]);
block             187 crypto/cast6_generic.c 	block[3] = be32_to_cpu(src[3]);
block             189 crypto/cast6_generic.c 	Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
block             190 crypto/cast6_generic.c 	Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
block             191 crypto/cast6_generic.c 	Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
block             192 crypto/cast6_generic.c 	Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
block             193 crypto/cast6_generic.c 	Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
block             194 crypto/cast6_generic.c 	Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
block             195 crypto/cast6_generic.c 	Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
block             196 crypto/cast6_generic.c 	Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
block             197 crypto/cast6_generic.c 	Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
block             198 crypto/cast6_generic.c 	Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
block             199 crypto/cast6_generic.c 	Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
block             200 crypto/cast6_generic.c 	Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
block             202 crypto/cast6_generic.c 	dst[0] = cpu_to_be32(block[0]);
block             203 crypto/cast6_generic.c 	dst[1] = cpu_to_be32(block[1]);
block             204 crypto/cast6_generic.c 	dst[2] = cpu_to_be32(block[2]);
block             205 crypto/cast6_generic.c 	dst[3] = cpu_to_be32(block[3]);
block             218 crypto/cast6_generic.c 	u32 block[4];
block             222 crypto/cast6_generic.c 	block[0] = be32_to_cpu(src[0]);
block             223 crypto/cast6_generic.c 	block[1] = be32_to_cpu(src[1]);
block             224 crypto/cast6_generic.c 	block[2] = be32_to_cpu(src[2]);
block             225 crypto/cast6_generic.c 	block[3] = be32_to_cpu(src[3]);
block             227 crypto/cast6_generic.c 	Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
block             228 crypto/cast6_generic.c 	Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
block             229 crypto/cast6_generic.c 	Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
block             230 crypto/cast6_generic.c 	Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
block             231 crypto/cast6_generic.c 	Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
block             232 crypto/cast6_generic.c 	Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
block             233 crypto/cast6_generic.c 	Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
block             234 crypto/cast6_generic.c 	Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
block             235 crypto/cast6_generic.c 	Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
block             236 crypto/cast6_generic.c 	Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
block             237 crypto/cast6_generic.c 	Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
block             238 crypto/cast6_generic.c 	Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
block             240 crypto/cast6_generic.c 	dst[0] = cpu_to_be32(block[0]);
block             241 crypto/cast6_generic.c 	dst[1] = cpu_to_be32(block[1]);
block             242 crypto/cast6_generic.c 	dst[2] = cpu_to_be32(block[2]);
block             243 crypto/cast6_generic.c 	dst[3] = cpu_to_be32(block[3]);
block              70 crypto/ccm.c   static int set_msg_len(u8 *block, unsigned int msglen, int csize)
block              74 crypto/ccm.c   	memset(block, 0, csize);
block              75 crypto/ccm.c   	block += csize;
block              83 crypto/ccm.c   	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
block             127 crypto/keywrap.c 	struct crypto_kw_block block;
block             141 crypto/keywrap.c 	memcpy(&block.A, req->iv, SEMIBSIZE);
block             159 crypto/keywrap.c 			scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
block             163 crypto/keywrap.c 			block.A ^= cpu_to_be64(t);
block             166 crypto/keywrap.c 			crypto_cipher_decrypt_one(cipher, (u8 *)&block,
block             167 crypto/keywrap.c 						  (u8 *)&block);
block             172 crypto/keywrap.c 			scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
block             184 crypto/keywrap.c 	if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
block             187 crypto/keywrap.c 	memzero_explicit(&block, sizeof(struct crypto_kw_block));
block             196 crypto/keywrap.c 	struct crypto_kw_block block;
block             214 crypto/keywrap.c 	block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
block             233 crypto/keywrap.c 			scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
block             237 crypto/keywrap.c 			crypto_cipher_encrypt_one(cipher, (u8 *)&block,
block             238 crypto/keywrap.c 						  (u8 *)&block);
block             240 crypto/keywrap.c 			block.A ^= cpu_to_be64(t);
block             244 crypto/keywrap.c 			scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
block             256 crypto/keywrap.c 	memcpy(req->iv, &block.A, SEMIBSIZE);
block             258 crypto/keywrap.c 	memzero_explicit(&block, sizeof(struct crypto_kw_block));
block              38 crypto/md4.c   	u32 block[MD4_BLOCK_WORDS];
block             135 crypto/md4.c   	le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block));
block             136 crypto/md4.c   	md4_transform(ctx->hash, ctx->block);
block             155 crypto/md4.c   	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
block             160 crypto/md4.c   		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block             165 crypto/md4.c   	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block             172 crypto/md4.c   	while (len >= sizeof(mctx->block)) {
block             173 crypto/md4.c   		memcpy(mctx->block, data, sizeof(mctx->block));
block             175 crypto/md4.c   		data += sizeof(mctx->block);
block             176 crypto/md4.c   		len -= sizeof(mctx->block);
block             179 crypto/md4.c   	memcpy(mctx->block, data, len);
block             188 crypto/md4.c   	char *p = (char *)mctx->block + offset;
block             195 crypto/md4.c   		p = (char *)mctx->block;
block             200 crypto/md4.c   	mctx->block[14] = mctx->byte_count << 3;
block             201 crypto/md4.c   	mctx->block[15] = mctx->byte_count >> 29;
block             202 crypto/md4.c   	le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
block             204 crypto/md4.c   	md4_transform(mctx->hash, mctx->block);
block             128 crypto/md5.c   	le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
block             129 crypto/md5.c   	md5_transform(ctx->hash, ctx->block);
block             148 crypto/md5.c   	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
block             153 crypto/md5.c   		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block             158 crypto/md5.c   	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block             165 crypto/md5.c   	while (len >= sizeof(mctx->block)) {
block             166 crypto/md5.c   		memcpy(mctx->block, data, sizeof(mctx->block));
block             168 crypto/md5.c   		data += sizeof(mctx->block);
block             169 crypto/md5.c   		len -= sizeof(mctx->block);
block             172 crypto/md5.c   	memcpy(mctx->block, data, len);
block             181 crypto/md5.c   	char *p = (char *)mctx->block + offset;
block             188 crypto/md5.c   		p = (char *)mctx->block;
block             193 crypto/md5.c   	mctx->block[14] = mctx->byte_count << 3;
block             194 crypto/md5.c   	mctx->block[15] = mctx->byte_count >> 29;
block             195 crypto/md5.c   	le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
block             197 crypto/md5.c   	md5_transform(mctx->hash, mctx->block);
block             785 crypto/wp512.c 	u64 block[8];    /* mu(buffer) */
block             791 crypto/wp512.c 		block[i] = be64_to_cpu(buffer[i]);
block             793 crypto/wp512.c 	state[0] = block[0] ^ (K[0] = wctx->hash[0]);
block             794 crypto/wp512.c 	state[1] = block[1] ^ (K[1] = wctx->hash[1]);
block             795 crypto/wp512.c 	state[2] = block[2] ^ (K[2] = wctx->hash[2]);
block             796 crypto/wp512.c 	state[3] = block[3] ^ (K[3] = wctx->hash[3]);
block             797 crypto/wp512.c 	state[4] = block[4] ^ (K[4] = wctx->hash[4]);
block             798 crypto/wp512.c 	state[5] = block[5] ^ (K[5] = wctx->hash[5]);
block             799 crypto/wp512.c 	state[6] = block[6] ^ (K[6] = wctx->hash[6]);
block             800 crypto/wp512.c 	state[7] = block[7] ^ (K[7] = wctx->hash[7]);
block             978 crypto/wp512.c 	wctx->hash[0] ^= state[0] ^ block[0];
block             979 crypto/wp512.c 	wctx->hash[1] ^= state[1] ^ block[1];
block             980 crypto/wp512.c 	wctx->hash[2] ^= state[2] ^ block[2];
block             981 crypto/wp512.c 	wctx->hash[3] ^= state[3] ^ block[3];
block             982 crypto/wp512.c 	wctx->hash[4] ^= state[4] ^ block[4];
block             983 crypto/wp512.c 	wctx->hash[5] ^= state[5] ^ block[5];
block             984 crypto/wp512.c 	wctx->hash[6] ^= state[6] ^ block[6];
block             985 crypto/wp512.c 	wctx->hash[7] ^= state[7] ^ block[7];
block             732 drivers/acpi/acpica/dbdisply.c 	u32 block = 0;
block             743 drivers/acpi/acpica/dbdisply.c 	block = 0;
block             767 drivers/acpi/acpica/dbdisply.c 			     block, gpe_block, gpe_block->node, buffer,
block             909 drivers/acpi/acpica/dbdisply.c 			block++;
block             191 drivers/acpi/arm64/gtdt.c static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block,
block             198 drivers/acpi/arm64/gtdt.c 	if (!block->timer_count) {
block             203 drivers/acpi/arm64/gtdt.c 	if (block->timer_count > ARCH_TIMER_MEM_MAX_FRAMES) {
block             205 drivers/acpi/arm64/gtdt.c 		       block->timer_count);
block             209 drivers/acpi/arm64/gtdt.c 	timer_mem->cntctlbase = (phys_addr_t)block->block_address;
block             217 drivers/acpi/arm64/gtdt.c 	gtdt_frame = (void *)block + block->timer_offset;
block             218 drivers/acpi/arm64/gtdt.c 	if (gtdt_frame + block->timer_count != (void *)block + block->header.length)
block             224 drivers/acpi/arm64/gtdt.c 	for (i = 0; i < block->timer_count; i++, gtdt_frame++) {
block             686 drivers/ata/libata-core.c 	u64 block = 0;
block             690 drivers/ata/libata-core.c 			block |= (u64)tf->hob_lbah << 40;
block             691 drivers/ata/libata-core.c 			block |= (u64)tf->hob_lbam << 32;
block             692 drivers/ata/libata-core.c 			block |= (u64)tf->hob_lbal << 24;
block             694 drivers/ata/libata-core.c 			block |= (tf->device & 0xf) << 24;
block             696 drivers/ata/libata-core.c 		block |= tf->lbah << 16;
block             697 drivers/ata/libata-core.c 		block |= tf->lbam << 8;
block             698 drivers/ata/libata-core.c 		block |= tf->lbal;
block             712 drivers/ata/libata-core.c 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
block             715 drivers/ata/libata-core.c 	return block;
block             740 drivers/ata/libata-core.c 		    u64 block, u32 n_block, unsigned int tf_flags,
block             748 drivers/ata/libata-core.c 		if (!lba_48_ok(block, n_block))
block             763 drivers/ata/libata-core.c 		tf->hob_lbah = (block >> 40) & 0xff;
block             764 drivers/ata/libata-core.c 		tf->hob_lbam = (block >> 32) & 0xff;
block             765 drivers/ata/libata-core.c 		tf->hob_lbal = (block >> 24) & 0xff;
block             766 drivers/ata/libata-core.c 		tf->lbah = (block >> 16) & 0xff;
block             767 drivers/ata/libata-core.c 		tf->lbam = (block >> 8) & 0xff;
block             768 drivers/ata/libata-core.c 		tf->lbal = block & 0xff;
block             782 drivers/ata/libata-core.c 		if (lba_28_ok(block, n_block)) {
block             784 drivers/ata/libata-core.c 			tf->device |= (block >> 24) & 0xf;
block             785 drivers/ata/libata-core.c 		} else if (lba_48_ok(block, n_block)) {
block             794 drivers/ata/libata-core.c 			tf->hob_lbah = (block >> 40) & 0xff;
block             795 drivers/ata/libata-core.c 			tf->hob_lbam = (block >> 32) & 0xff;
block             796 drivers/ata/libata-core.c 			tf->hob_lbal = (block >> 24) & 0xff;
block             806 drivers/ata/libata-core.c 		tf->lbah = (block >> 16) & 0xff;
block             807 drivers/ata/libata-core.c 		tf->lbam = (block >> 8) & 0xff;
block             808 drivers/ata/libata-core.c 		tf->lbal = block & 0xff;
block             816 drivers/ata/libata-core.c 		if (!lba_28_ok(block, n_block))
block             823 drivers/ata/libata-core.c 		track = (u32)block / dev->sectors;
block             826 drivers/ata/libata-core.c 		sect  = (u32)block % dev->sectors + 1;
block             829 drivers/ata/libata-core.c 			(u32)block, track, cyl, head, sect);
block            1173 drivers/ata/libata-scsi.c 	u64 block;
block            1202 drivers/ata/libata-scsi.c 	block = ata_tf_read_block(&qc->result_tf, dev);
block            1203 drivers/ata/libata-scsi.c 	if (block == U64_MAX)
block            1206 drivers/ata/libata-scsi.c 	scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
block            1680 drivers/ata/libata-scsi.c 	u64 block;
block            1692 drivers/ata/libata-scsi.c 		scsi_10_lba_len(cdb, &block, &n_block);
block            1698 drivers/ata/libata-scsi.c 		scsi_16_lba_len(cdb, &block, &n_block);
block            1706 drivers/ata/libata-scsi.c 	if (block >= dev_sectors)
block            1708 drivers/ata/libata-scsi.c 	if ((block + n_block) > dev_sectors)
block            1714 drivers/ata/libata-scsi.c 		if (lba_28_ok(block, n_block)) {
block            1717 drivers/ata/libata-scsi.c 			tf->device |= (block >> 24) & 0xf;
block            1718 drivers/ata/libata-scsi.c 		} else if (lba_48_ok(block, n_block)) {
block            1728 drivers/ata/libata-scsi.c 			tf->hob_lbah = (block >> 40) & 0xff;
block            1729 drivers/ata/libata-scsi.c 			tf->hob_lbam = (block >> 32) & 0xff;
block            1730 drivers/ata/libata-scsi.c 			tf->hob_lbal = (block >> 24) & 0xff;
block            1737 drivers/ata/libata-scsi.c 		tf->lbah = (block >> 16) & 0xff;
block            1738 drivers/ata/libata-scsi.c 		tf->lbam = (block >> 8) & 0xff;
block            1739 drivers/ata/libata-scsi.c 		tf->lbal = block & 0xff;
block            1746 drivers/ata/libata-scsi.c 		if (!lba_28_ok(block, n_block))
block            1750 drivers/ata/libata-scsi.c 		track = (u32)block / dev->sectors;
block            1753 drivers/ata/libata-scsi.c 		sect  = (u32)block % dev->sectors + 1;
block            1756 drivers/ata/libata-scsi.c 			(u32)block, track, cyl, head, sect);
block            1829 drivers/ata/libata-scsi.c 	u64 block;
block            1845 drivers/ata/libata-scsi.c 		scsi_10_lba_len(cdb, &block, &n_block);
block            1857 drivers/ata/libata-scsi.c 		scsi_6_lba_len(cdb, &block, &n_block);
block            1873 drivers/ata/libata-scsi.c 		scsi_16_lba_len(cdb, &block, &n_block);
block            1899 drivers/ata/libata-scsi.c 	rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
block            3457 drivers/ata/libata-scsi.c 	u64 block;
block            3481 drivers/ata/libata-scsi.c 	scsi_16_lba_len(cdb, &block, &n_block);
block            3509 drivers/ata/libata-scsi.c 	size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
block            3701 drivers/ata/libata-scsi.c 	u64 block;
block            3710 drivers/ata/libata-scsi.c 	scsi_16_lba_len(cdb, &block, &n_block);
block            3751 drivers/ata/libata-scsi.c 	tf->lbah = (block >> 16) & 0xff;
block            3752 drivers/ata/libata-scsi.c 	tf->lbam = (block >> 8) & 0xff;
block            3753 drivers/ata/libata-scsi.c 	tf->lbal = block & 0xff;
block            3754 drivers/ata/libata-scsi.c 	tf->hob_lbah = (block >> 40) & 0xff;
block            3755 drivers/ata/libata-scsi.c 	tf->hob_lbam = (block >> 32) & 0xff;
block            3756 drivers/ata/libata-scsi.c 	tf->hob_lbal = (block >> 24) & 0xff;
block            3784 drivers/ata/libata-scsi.c 	u64 block;
block            3800 drivers/ata/libata-scsi.c 	scsi_16_lba_len(cdb, &block, &n_block);
block            3813 drivers/ata/libata-scsi.c 		block = 0;
block            3814 drivers/ata/libata-scsi.c 	} else if (block >= dev->n_sectors) {
block            3835 drivers/ata/libata-scsi.c 	tf->lbah = (block >> 16) & 0xff;
block            3836 drivers/ata/libata-scsi.c 	tf->lbam = (block >> 8) & 0xff;
block            3837 drivers/ata/libata-scsi.c 	tf->lbal = block & 0xff;
block            3838 drivers/ata/libata-scsi.c 	tf->hob_lbah = (block >> 40) & 0xff;
block            3839 drivers/ata/libata-scsi.c 	tf->hob_lbam = (block >> 32) & 0xff;
block            3840 drivers/ata/libata-scsi.c 	tf->hob_lbal = (block >> 24) & 0xff;
block              45 drivers/ata/libata.h 			   u64 block, u32 n_block, unsigned int tf_flags,
block             914 drivers/base/power/wakeup.c bool pm_get_wakeup_count(unsigned int *count, bool block)
block             918 drivers/base/power/wakeup.c 	if (block) {
block             243 drivers/base/regmap/internal.h int regcache_sync_block(struct regmap *map, void *block,
block              23 drivers/base/regmap/regcache-rbtree.c 	void *block;
block              51 drivers/base/regmap/regcache-rbtree.c 	return regcache_get_val(map, rbnode->block, idx);
block              59 drivers/base/regmap/regcache-rbtree.c 	regcache_set_val(map, rbnode->block, idx, val);
block             231 drivers/base/regmap/regcache-rbtree.c 		kfree(rbtree_node->block);
block             278 drivers/base/regmap/regcache-rbtree.c 	blk = krealloc(rbnode->block,
block             308 drivers/base/regmap/regcache-rbtree.c 	rbnode->block = blk;
block             349 drivers/base/regmap/regcache-rbtree.c 	rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
block             351 drivers/base/regmap/regcache-rbtree.c 	if (!rbnode->block)
block             363 drivers/base/regmap/regcache-rbtree.c 	kfree(rbnode->block);
block             496 drivers/base/regmap/regcache-rbtree.c 		ret = regcache_sync_block(map, rbnode->block,
block             668 drivers/base/regmap/regcache.c static int regcache_sync_block_single(struct regmap *map, void *block,
block             683 drivers/base/regmap/regcache.c 		val = regcache_get_val(map, block, i);
block             732 drivers/base/regmap/regcache.c static int regcache_sync_block_raw(struct regmap *map, void *block,
block             755 drivers/base/regmap/regcache.c 		val = regcache_get_val(map, block, i);
block             765 drivers/base/regmap/regcache.c 			data = regcache_get_val_addr(map, block, i);
block             774 drivers/base/regmap/regcache.c int regcache_sync_block(struct regmap *map, void *block,
block             780 drivers/base/regmap/regcache.c 		return regcache_sync_block_raw(map, block, cache_present,
block             783 drivers/base/regmap/regcache.c 		return regcache_sync_block_single(map, block, cache_present,
block            1462 drivers/block/amiflop.c 	unsigned int cnt, block, track, sector;
block            1471 drivers/block/amiflop.c 		block = blk_rq_pos(rq) + cnt;
block            1472 drivers/block/amiflop.c 		track = block / (floppy->dtype->sects * floppy->type->sect_mult);
block            1473 drivers/block/amiflop.c 		sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
block            1458 drivers/block/ataflop.c 	int block = ReqBlock + ReqCnt;
block            1460 drivers/block/ataflop.c 	ReqTrack = block / UDT->spt;
block            1461 drivers/block/ataflop.c 	ReqSector = block - ReqTrack * UDT->spt + 1;
block            2240 drivers/block/floppy.c 	int block;
block            2255 drivers/block/floppy.c 		block = current_count_sectors + blk_rq_pos(req);
block            2256 drivers/block/floppy.c 		INFBOUND(DRS->maxblock, block);
block            2257 drivers/block/floppy.c 		if (block > _floppy->sect)
block             340 drivers/block/paride/pd.c static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
block             345 drivers/block/paride/pd.c 		s = block & 255;
block             346 drivers/block/paride/pd.c 		c0 = (block >>= 8) & 255;
block             347 drivers/block/paride/pd.c 		c1 = (block >>= 8) & 255;
block             348 drivers/block/paride/pd.c 		h = ((block >>= 8) & 15) + 0x40;
block             350 drivers/block/paride/pd.c 		s = (block % disk->sectors) + 1;
block             351 drivers/block/paride/pd.c 		h = (block /= disk->sectors) % disk->heads;
block             352 drivers/block/paride/pd.c 		c0 = (block /= disk->heads) % 256;
block             353 drivers/block/paride/pd.c 		c1 = (block >>= 8);
block             570 drivers/cdrom/gdrom.c 	int block, block_cnt;
block             581 drivers/cdrom/gdrom.c 	block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block             587 drivers/cdrom/gdrom.c 	read_command->cmd[2] = (block >> 16) & 0xFF;
block             588 drivers/cdrom/gdrom.c 	read_command->cmd[3] = (block >> 8) & 0xFF;
block             589 drivers/cdrom/gdrom.c 	read_command->cmd[4] = block & 0xFF;
block             113 drivers/char/hw_random/n2-drv.c 	int block = 0, busy = 0;
block             129 drivers/char/hw_random/n2-drv.c 			if (++block >= N2RNG_BLOCK_LIMIT)
block             179 drivers/char/hw_random/n2-drv.c 	int block = 0, hcheck = 0;
block             187 drivers/char/hw_random/n2-drv.c 			if (++block >= N2RNG_BLOCK_LIMIT)
block             226 drivers/char/hw_random/n2-drv.c 	int block = 0;
block             236 drivers/char/hw_random/n2-drv.c 			if (++block >= N2RNG_BLOCK_LIMIT)
block             255 drivers/char/hw_random/n2-drv.c 	int block = 0, busy = 0;
block             264 drivers/char/hw_random/n2-drv.c 			if (++block >= N2RNG_BLOCK_LIMIT)
block             147 drivers/char/ipmi/ipmb_dev_int.c 	data.block[0] = msg_len;
block             148 drivers/char/ipmi/ipmb_dev_int.c 	memcpy(&data.block[1], msg + SMBUS_MSG_IDX_OFFSET, msg_len);
block            1005 drivers/char/random.c 		__u8	block[CHACHA_BLOCK_SIZE];
block            1014 drivers/char/random.c 		_extract_crng(&primary_crng, buf.block);
block            1015 drivers/char/random.c 		_crng_backtrack_protect(&primary_crng, buf.block,
block            1401 drivers/crypto/ccree/cc_aead.c static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
block            1405 drivers/crypto/ccree/cc_aead.c 	memset(block, 0, csize);
block            1406 drivers/crypto/ccree/cc_aead.c 	block += csize;
block            1414 drivers/crypto/ccree/cc_aead.c 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
block            2725 drivers/crypto/chelsio/chcr_algo.c static int set_msg_len(u8 *block, unsigned int msglen, int csize)
block            2729 drivers/crypto/chelsio/chcr_algo.c 	memset(block, 0, csize);
block            2730 drivers/crypto/chelsio/chcr_algo.c 	block += csize;
block            2738 drivers/crypto/chelsio/chcr_algo.c 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
block             880 drivers/crypto/marvell/hash.c 				    &out_state->byte_count, out_state->block);
block             888 drivers/crypto/marvell/hash.c 				    in_state->block);
block             104 drivers/crypto/nx/nx-aes-ccm.c static int set_msg_len(u8 *block, unsigned int msglen, int csize)
block             108 drivers/crypto/nx/nx-aes-ccm.c 	memset(block, 0, csize);
block             109 drivers/crypto/nx/nx-aes-ccm.c 	block += csize;
block             117 drivers/crypto/nx/nx-aes-ccm.c 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
block             204 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	u32 ivsize, block, conf_reg = 0;
block             206 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	block = crypto_tfm_alg_blocksize(tfm);
block             209 drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 	if (block == DES_BLOCK_SIZE) {
block              57 drivers/crypto/sunxi-ss/sun4i-ss-hash.c 	memcpy(octx->block, op->buf, op->len);
block              83 drivers/crypto/sunxi-ss/sun4i-ss-hash.c 	memcpy(op->buf, ictx->block, op->len);
block              49 drivers/dma/dw/dw.c 	u32 block;
block              52 drivers/dma/dw/dw.c 		block = dwc->block_size;
block              55 drivers/dma/dw/dw.c 		block = bytes >> width;
block              59 drivers/dma/dw/dw.c 	return block;
block              62 drivers/dma/dw/dw.c static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
block              64 drivers/dma/dw/dw.c 	return DWC_CTLH_BLOCK_TS(block) << width;
block              55 drivers/dma/dw/idma32.c 	u32 block;
block              58 drivers/dma/dw/idma32.c 		block = dwc->block_size;
block              61 drivers/dma/dw/idma32.c 		block = bytes;
block              65 drivers/dma/dw/idma32.c 	return block;
block              68 drivers/dma/dw/idma32.c static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
block              70 drivers/dma/dw/idma32.c 	return IDMA32C_CTLH_BLOCK_TS(block);
block             330 drivers/dma/dw/regs.h 	size_t	(*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
block             575 drivers/dma/fsl-qdma.c 	void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
block             582 drivers/dma/fsl-qdma.c 		block = fsl_qdma->block_base +
block             585 drivers/dma/fsl-qdma.c 			qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
block             597 drivers/dma/fsl-qdma.c 		block = fsl_qdma->block_base +
block             601 drivers/dma/fsl-qdma.c 		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
block             608 drivers/dma/fsl-qdma.c 			    block + FSL_QDMA_BCQIDR(0));
block             616 drivers/dma/fsl-qdma.c 				 void *block,
block             631 drivers/dma/fsl-qdma.c 		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
block             667 drivers/dma/fsl-qdma.c 			reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
block             674 drivers/dma/fsl-qdma.c 			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
block             680 drivers/dma/fsl-qdma.c 		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
block             686 drivers/dma/fsl-qdma.c 		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
block             718 drivers/dma/fsl-qdma.c 	void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
block             727 drivers/dma/fsl-qdma.c 	block = fsl_qdma->block_base +
block             730 drivers/dma/fsl-qdma.c 	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
block             733 drivers/dma/fsl-qdma.c 		intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
block             739 drivers/dma/fsl-qdma.c 		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
block             745 drivers/dma/fsl-qdma.c 		    block + FSL_QDMA_BCQIDR(0));
block             823 drivers/dma/fsl-qdma.c 	void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
block             839 drivers/dma/fsl-qdma.c 		block = fsl_qdma->block_base +
block             842 drivers/dma/fsl-qdma.c 			    block + FSL_QDMA_BCQIDR(0));
block             846 drivers/dma/fsl-qdma.c 		block = fsl_qdma->block_base +
block             859 drivers/dma/fsl-qdma.c 				    block + FSL_QDMA_BCQDPA_SADDR(i));
block             861 drivers/dma/fsl-qdma.c 				    block + FSL_QDMA_BCQEPA_SADDR(i));
block             867 drivers/dma/fsl-qdma.c 			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
block             877 drivers/dma/fsl-qdma.c 			    block + FSL_QDMA_SQCCMR);
block             887 drivers/dma/fsl-qdma.c 			    block + FSL_QDMA_SQEPAR);
block             889 drivers/dma/fsl-qdma.c 			    block + FSL_QDMA_SQDPAR);
block             892 drivers/dma/fsl-qdma.c 			    block + FSL_QDMA_BCQIER(0));
block             895 drivers/dma/fsl-qdma.c 				   block + FSL_QDMA_BSQICR);
block             898 drivers/dma/fsl-qdma.c 				   block + FSL_QDMA_CQIER);
block             905 drivers/dma/fsl-qdma.c 		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
block             906 drivers/dma/fsl-qdma.c 		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
block             943 drivers/dma/fsl-qdma.c 	void __iomem *block = fsl_queue->block_base;
block             945 drivers/dma/fsl-qdma.c 	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
block             961 drivers/dma/fsl-qdma.c 	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
block             963 drivers/dma/fsl-qdma.c 	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
block             129 drivers/dma/ppc4xx/adma.c static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
block             138 drivers/dma/ppc4xx/adma.c 		cdb = block;
block             153 drivers/dma/ppc4xx/adma.c 		cb = block;
block              64 drivers/edac/edac_device.c 	unsigned instance, block, attr;
block             157 drivers/edac/edac_device.c 		for (block = 0; block < nr_blocks; block++) {
block             158 drivers/edac/edac_device.c 			blk = &blk_p[block];
block             161 drivers/edac/edac_device.c 				 "%s%d", edac_block_name, block+offset_value);
block             164 drivers/edac/edac_device.c 				 instance, inst, block, blk, blk->name);
block             174 drivers/edac/edac_device.c 			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
block             195 drivers/edac/edac_device.c 				attrib->block = blk;	/* up link */
block             562 drivers/edac/edac_device.c 	struct edac_device_block *block = NULL;
block             584 drivers/edac/edac_device.c 		block = instance->blocks + block_nr;
block             585 drivers/edac/edac_device.c 		block->counters.ce_count++;
block             596 drivers/edac/edac_device.c 				block ? block->name : "N/A", msg);
block             604 drivers/edac/edac_device.c 	struct edac_device_block *block = NULL;
block             626 drivers/edac/edac_device.c 		block = instance->blocks + block_nr;
block             627 drivers/edac/edac_device.c 		block->counters.ue_count++;
block             638 drivers/edac/edac_device.c 				block ? block->name : "N/A", msg);
block             643 drivers/edac/edac_device.c 			block ? block->name : "N/A", msg);
block             111 drivers/edac/edac_device.h 	struct edac_device_block *block;
block             416 drivers/edac/edac_device_sysfs.c 	struct edac_device_block *block = to_block(kobj);
block             418 drivers/edac/edac_device_sysfs.c 	return sprintf(data, "%u\n", block->counters.ue_count);
block             424 drivers/edac/edac_device_sysfs.c 	struct edac_device_block *block = to_block(kobj);
block             426 drivers/edac/edac_device_sysfs.c 	return sprintf(data, "%u\n", block->counters.ce_count);
block             432 drivers/edac/edac_device_sysfs.c 	struct edac_device_block *block;
block             437 drivers/edac/edac_device_sysfs.c 	block = to_block(kobj);
block             442 drivers/edac/edac_device_sysfs.c 	kobject_put(&block->instance->ctl->kobj);
block             509 drivers/edac/edac_device_sysfs.c 				struct edac_device_block *block)
block             517 drivers/edac/edac_device_sysfs.c 		 instance->name, instance, block->name, block);
block             519 drivers/edac/edac_device_sysfs.c 		 &block->kobj, &block->kobj.parent);
block             522 drivers/edac/edac_device_sysfs.c 	memset(&block->kobj, 0, sizeof(struct kobject));
block             534 drivers/edac/edac_device_sysfs.c 	err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
block             536 drivers/edac/edac_device_sysfs.c 				   "%s", block->name);
block             538 drivers/edac/edac_device_sysfs.c 		edac_dbg(1, "Failed to register instance '%s'\n", block->name);
block             547 drivers/edac/edac_device_sysfs.c 	sysfs_attrib = block->block_attributes;
block             548 drivers/edac/edac_device_sysfs.c 	if (sysfs_attrib && block->nr_attribs) {
block             549 drivers/edac/edac_device_sysfs.c 		for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
block             553 drivers/edac/edac_device_sysfs.c 				 sysfs_attrib, &block->kobj);
block             556 drivers/edac/edac_device_sysfs.c 			err = sysfs_create_file(&block->kobj,
block             562 drivers/edac/edac_device_sysfs.c 	kobject_uevent(&block->kobj, KOBJ_ADD);
block             568 drivers/edac/edac_device_sysfs.c 	kobject_put(&block->kobj);
block             578 drivers/edac/edac_device_sysfs.c 				struct edac_device_block *block)
block             586 drivers/edac/edac_device_sysfs.c 	sysfs_attrib = block->block_attributes;
block             587 drivers/edac/edac_device_sysfs.c 	if (sysfs_attrib && block->nr_attribs) {
block             588 drivers/edac/edac_device_sysfs.c 		for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
block             591 drivers/edac/edac_device_sysfs.c 			sysfs_remove_file(&block->kobj,
block             599 drivers/edac/edac_device_sysfs.c 	kobject_put(&block->kobj);
block              43 drivers/firewire/core-card.c int fw_compute_block_crc(__be32 *block)
block              48 drivers/firewire/core-card.c 	length = (be32_to_cpu(block[0]) >> 16) & 0xff;
block              49 drivers/firewire/core-card.c 	crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
block              50 drivers/firewire/core-card.c 	*block |= cpu_to_be32(crc);
block              67 drivers/firewire/core-device.c static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
block              75 drivers/firewire/core-device.c 	quadlets = min(block[0] >> 16, 256U);
block              79 drivers/firewire/core-device.c 	if (block[1] != 0 || block[2] != 0)
block              83 drivers/firewire/core-device.c 	block += 3;
block              86 drivers/firewire/core-device.c 		c = block[i / 4] >> (24 - 8 * (i % 4));
block             121 drivers/firewire/core.h int fw_compute_block_crc(__be32 *block);
block              12 drivers/firmware/efi/libstub/arm32-stub.c 	int block;
block              19 drivers/firmware/efi/libstub/arm32-stub.c 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
block              20 drivers/firmware/efi/libstub/arm32-stub.c 	if (block < 5) {
block             749 drivers/firmware/efi/vars.c 			  bool block, unsigned long size, void *data)
block             771 drivers/firmware/efi/vars.c 	if (!block && ops->set_variable_nonblocking)
block             775 drivers/firmware/efi/vars.c 	if (!block) {
block             134 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             136 drivers/gpio/gpio-sch311x.c 	if (block->config_regs[offset] == 0) /* GPIO is not available */
block             139 drivers/gpio/gpio-sch311x.c 	if (!request_region(block->runtime_reg + block->config_regs[offset],
block             142 drivers/gpio/gpio-sch311x.c 			block->runtime_reg + block->config_regs[offset]);
block             150 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             152 drivers/gpio/gpio-sch311x.c 	if (block->config_regs[offset] == 0) /* GPIO is not available */
block             155 drivers/gpio/gpio-sch311x.c 	release_region(block->runtime_reg + block->config_regs[offset], 1);
block             160 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             163 drivers/gpio/gpio-sch311x.c 	spin_lock(&block->lock);
block             164 drivers/gpio/gpio-sch311x.c 	data = inb(block->runtime_reg + block->data_reg);
block             165 drivers/gpio/gpio-sch311x.c 	spin_unlock(&block->lock);
block             170 drivers/gpio/gpio-sch311x.c static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
block             173 drivers/gpio/gpio-sch311x.c 	u8 data = inb(block->runtime_reg + block->data_reg);
block             178 drivers/gpio/gpio-sch311x.c 	outb(data, block->runtime_reg + block->data_reg);
block             184 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             186 drivers/gpio/gpio-sch311x.c 	spin_lock(&block->lock);
block             187 drivers/gpio/gpio-sch311x.c 	__sch311x_gpio_set(block, offset, value);
block             188 drivers/gpio/gpio-sch311x.c 	spin_unlock(&block->lock);
block             193 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             196 drivers/gpio/gpio-sch311x.c 	spin_lock(&block->lock);
block             197 drivers/gpio/gpio-sch311x.c 	data = inb(block->runtime_reg + block->config_regs[offset]);
block             199 drivers/gpio/gpio-sch311x.c 	outb(data, block->runtime_reg + block->config_regs[offset]);
block             200 drivers/gpio/gpio-sch311x.c 	spin_unlock(&block->lock);
block             208 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             211 drivers/gpio/gpio-sch311x.c 	spin_lock(&block->lock);
block             213 drivers/gpio/gpio-sch311x.c 	data = inb(block->runtime_reg + block->config_regs[offset]);
block             215 drivers/gpio/gpio-sch311x.c 	outb(data, block->runtime_reg + block->config_regs[offset]);
block             216 drivers/gpio/gpio-sch311x.c 	__sch311x_gpio_set(block, offset, value);
block             218 drivers/gpio/gpio-sch311x.c 	spin_unlock(&block->lock);
block             224 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             227 drivers/gpio/gpio-sch311x.c 	spin_lock(&block->lock);
block             228 drivers/gpio/gpio-sch311x.c 	data = inb(block->runtime_reg + block->config_regs[offset]);
block             229 drivers/gpio/gpio-sch311x.c 	spin_unlock(&block->lock);
block             237 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block = gpiochip_get_data(chip);
block             243 drivers/gpio/gpio-sch311x.c 		spin_lock(&block->lock);
block             244 drivers/gpio/gpio-sch311x.c 		data = inb(block->runtime_reg + block->config_regs[offset]);
block             246 drivers/gpio/gpio-sch311x.c 		outb(data, block->runtime_reg + block->config_regs[offset]);
block             247 drivers/gpio/gpio-sch311x.c 		spin_unlock(&block->lock);
block             250 drivers/gpio/gpio-sch311x.c 		spin_lock(&block->lock);
block             251 drivers/gpio/gpio-sch311x.c 		data = inb(block->runtime_reg + block->config_regs[offset]);
block             253 drivers/gpio/gpio-sch311x.c 		outb(data, block->runtime_reg + block->config_regs[offset]);
block             254 drivers/gpio/gpio-sch311x.c 		spin_unlock(&block->lock);
block             266 drivers/gpio/gpio-sch311x.c 	struct sch311x_gpio_block *block;
block             284 drivers/gpio/gpio-sch311x.c 		block = &priv->blocks[i];
block             286 drivers/gpio/gpio-sch311x.c 		spin_lock_init(&block->lock);
block             288 drivers/gpio/gpio-sch311x.c 		block->chip.label = DRV_NAME;
block             289 drivers/gpio/gpio-sch311x.c 		block->chip.owner = THIS_MODULE;
block             290 drivers/gpio/gpio-sch311x.c 		block->chip.request = sch311x_gpio_request;
block             291 drivers/gpio/gpio-sch311x.c 		block->chip.free = sch311x_gpio_free;
block             292 drivers/gpio/gpio-sch311x.c 		block->chip.direction_input = sch311x_gpio_direction_in;
block             293 drivers/gpio/gpio-sch311x.c 		block->chip.direction_output = sch311x_gpio_direction_out;
block             294 drivers/gpio/gpio-sch311x.c 		block->chip.get_direction = sch311x_gpio_get_direction;
block             295 drivers/gpio/gpio-sch311x.c 		block->chip.set_config = sch311x_gpio_set_config;
block             296 drivers/gpio/gpio-sch311x.c 		block->chip.get = sch311x_gpio_get;
block             297 drivers/gpio/gpio-sch311x.c 		block->chip.set = sch311x_gpio_set;
block             298 drivers/gpio/gpio-sch311x.c 		block->chip.ngpio = 8;
block             299 drivers/gpio/gpio-sch311x.c 		block->chip.parent = &pdev->dev;
block             300 drivers/gpio/gpio-sch311x.c 		block->chip.base = sch311x_gpio_blocks[i].base;
block             301 drivers/gpio/gpio-sch311x.c 		block->config_regs = sch311x_gpio_blocks[i].config_regs;
block             302 drivers/gpio/gpio-sch311x.c 		block->data_reg = sch311x_gpio_blocks[i].data_reg;
block             303 drivers/gpio/gpio-sch311x.c 		block->runtime_reg = pdata->runtime_reg;
block             305 drivers/gpio/gpio-sch311x.c 		err = gpiochip_add_data(&block->chip, block);
block             264 drivers/gpio/gpio-ts5500.c 	const struct ts5500_dio *block = priv->pinout;
block             265 drivers/gpio/gpio-ts5500.c 	const struct ts5500_dio line = block[offset];
block             316 drivers/gpio/gpio-ts5500.c 	enum ts5500_blocks block = platform_get_device_id(pdev)->driver_data;
block             348 drivers/gpio/gpio-ts5500.c 	switch (block) {
block            1087 drivers/gpu/drm/amd/amdgpu/amdgpu.h #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
block            1088 drivers/gpu/drm/amd/amdgpu/amdgpu.h #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
block             469 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 					  uint32_t block, uint32_t reg)
block             472 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		  reg, block);
block             489 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 				      uint32_t block,
block             493 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		  reg, block, v);
block            2296 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		struct amdgpu_ip_block *block;
block            2299 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			block = &adev->ip_blocks[j];
block            2301 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			block->status.hw = false;
block            2302 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			if (block->version->type != ip_order[i] ||
block            2303 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 				!block->status.valid)
block            2306 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			r = block->version->funcs->hw_init(adev);
block            2307 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
block            2310 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			block->status.hw = true;
block            2332 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		struct amdgpu_ip_block *block;
block            2335 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			block = &adev->ip_blocks[j];
block            2337 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			if (block->version->type != ip_order[i] ||
block            2338 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 				!block->status.valid ||
block            2339 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 				block->status.hw)
block            2342 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			r = block->version->funcs->hw_init(adev);
block            2343 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
block            2346 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			block->status.hw = true;
block             155 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		data->head.block = block_id;
block             256 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	if (!amdgpu_ras_is_supported(adev, data.head.block))
block             334 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
block             337 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	obj = &con->objs[head->block];
block             362 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
block             365 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		obj = &con->objs[head->block];
block             368 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			WARN_ON(head->block != obj->head.block);
block             375 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 				WARN_ON(i != obj->head.block);
block             391 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	return con->hw_supported & BIT(head->block);
block             399 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	return con->features & BIT(head->block);
block             432 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		con->features |= BIT(head->block);
block             435 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			con->features &= ~BIT(head->block);
block             456 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			.block_id =  amdgpu_ras_block_to_ta(head->block),
block             461 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			.block_id =  amdgpu_ras_block_to_ta(head->block),
block             476 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 				ras_block_str(head->block),
block             516 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 						ras_block_str(head->block));
block             565 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			.block = i,
block             597 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	switch (info->head.block) {
block             627 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			 obj->err_data.ce_count, ras_block_str(info->head.block));
block             630 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			 obj->err_data.ue_count, ras_block_str(info->head.block));
block             641 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
block             652 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	switch (info->head.block) {
block             665 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			 ras_block_str(info->head.block));
block             671 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 				ras_block_str(info->head.block),
block            1388 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		unsigned int block)
block            1506 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
block             303 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 	enum amdgpu_ras_block block;
block             475 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 		unsigned int block)
block             479 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
block             481 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 	return ras && (ras->supported & (1 << block));
block             485 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 		unsigned int block);
block             510 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
block             511 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 	switch (block) {
block             541 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 		WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
block            4431 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		.block = AMDGPU_RAS_BLOCK__GFX,
block            6080 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
block             638 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
block             648 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c 	       "write" : "read", block, mc_client, mc_id);
block             748 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
block             759 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 	       "write" : "read", block, mc_client, mc_id);
block             992 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
block            1003 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 	       "write" : "read", block, mc_client, mc_id);
block             848 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
block             850 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
block             855 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
block             871 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 						ras_block->block);
block             891 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 					ras_block->block);
block             900 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
block             912 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
block             923 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
block             942 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		.block = AMDGPU_RAS_BLOCK__UMC,
block             952 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		.block = AMDGPU_RAS_BLOCK__MMHUB,
block            1709 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		.block = AMDGPU_RAS_BLOCK__SDMA,
block              39 drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c #define SRI(reg_name, block, id)\
block              40 drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c 	.reg_name = mm ## block ## id ## _ ## reg_name
block              41 drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c #define SRI(reg_name, block, id)\
block              42 drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c 	.reg_name = mm ## block ## id ## _ ## reg_name
block              43 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c #define CLK_REG(reg_name, block, inst)\
block              44 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c 	CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
block              45 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c 					mm ## block ## _ ## inst ## _ ## reg_name
block             138 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c #define SRI(reg_name, block, id)\
block             139 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
block             450 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c #define SRII(reg_name, block, id)\
block             451 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
block             149 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c #define SRI(reg_name, block, id)\
block             150 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
block             492 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c #define SRII(reg_name, block, id)\
block             493 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
block             148 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c #define SRI(reg_name, block, id)\
block             149 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
block             470 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c #define SRII(reg_name, block, id)\
block             471 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
block             141 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define SRI(reg_name, block, id)\
block             142 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             143 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 					mm ## block ## id ## _ ## reg_name
block             703 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define SRII(reg_name, block, id)\
block             704 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             705 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 					mm ## block ## id ## _ ## reg_name
block             155 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c #define SRI(reg_name, block, id)\
block             156 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	.reg_name = mm ## block ## id ## _ ## reg_name
block             562 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c #define SRII(reg_name, block, id)\
block             563 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 	.reg_name[id] = mm ## block ## id ## _ ## reg_name
block              40 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h #define SRI(reg_name, block, id)\
block              41 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              42 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 					mm ## block ## id ## _ ## reg_name
block              45 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h #define SRII(reg_name, block, id)\
block              46 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              47 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h 					mm ## block ## id ## _ ## reg_name
block             173 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define SRI(reg_name, block, id)\
block             174 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             175 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## block ## id ## _ ## reg_name
block             178 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define SRII(reg_name, block, id)\
block             179 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             180 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c 					mm ## block ## id ## _ ## reg_name
block              41 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h #define SRI(reg_name, block, id)\
block              42 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              43 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 					mm ## block ## id ## _ ## reg_name
block              45 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h #define SRI2(reg_name, block, id)\
block              49 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h #define SRII(reg_name, block, id)\
block              50 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              51 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h 					mm ## block ## id ## _ ## reg_name
block              43 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h #define SRI(reg_name, block, id)\
block              44 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              45 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 					mm ## block ## id ## _ ## reg_name
block              47 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h #define SRI2(reg_name, block, id)\
block              51 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h #define SRII(reg_name, block, id)\
block              52 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              53 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h 					mm ## block ## id ## _ ## reg_name
block             427 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define SRI(reg_name, block, id)\
block             428 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             429 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
block             431 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define SRIR(var_name, reg_name, block, id)\
block             432 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             433 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
block             435 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define SRII(reg_name, block, id)\
block             436 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             437 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
block             439 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define DCCG_SRII(reg_name, block, id)\
block             440 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             441 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 					mm ## block ## id ## _ ## reg_name
block              37 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h #define SRI(reg_name, block, id)\
block              38 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              39 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h 					mm ## block ## id ## _ ## reg_name
block             290 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define SRI(reg_name, block, id)\
block             291 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             292 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
block             294 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define SRIR(var_name, reg_name, block, id)\
block             295 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             296 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
block             298 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define SRII(reg_name, block, id)\
block             299 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             300 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
block             302 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define DCCG_SRII(reg_name, block, id)\
block             303 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             304 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 					mm ## block ## id ## _ ## reg_name
block             173 drivers/gpu/drm/amd/display/dc/dm_services.h #define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
block             176 drivers/gpu/drm/amd/display/dc/dm_services.h 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
block             177 drivers/gpu/drm/amd/display/dc/dm_services.h 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
block             179 drivers/gpu/drm/amd/display/dc/dm_services.h #define set_reg_field_value_soc15(reg_value, value, block, reg_num, reg_name, reg_field)\
block             183 drivers/gpu/drm/amd/display/dc/dm_services.h 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
block             184 drivers/gpu/drm/amd/display/dc/dm_services.h 		block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
block              48 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c #define REGI(reg_name, block, id)\
block              49 drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c 	mm ## block ## id ## _ ## reg_name
block              63 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c #define REGI(reg_name, block, id)\
block              64 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              65 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c 				mm ## block ## id ## _ ## reg_name
block              54 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c #define REGI(reg_name, block, id)\
block              55 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              56 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c 				mm ## block ## id ## _ ## reg_name
block              60 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c #define REGI(reg_name, block, id)\
block              61 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              62 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c 				mm ## block ## id ## _ ## reg_name
block              54 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c #define REGI(reg_name, block, id)\
block              55 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              56 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c 				mm ## block ## id ## _ ## reg_name
block              66 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c #define REGI(reg_name, block, id)\
block              67 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              68 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c 				mm ## block ## id ## _ ## reg_name
block              64 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c #define REGI(reg_name, block, id)\
block              65 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block              66 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c 				mm ## block ## id ## _ ## reg_name
block              83 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h #define CLK_SRI(reg_name, block, inst)\
block              84 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h 	.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
block              85 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h 					mm ## block ## _ ## inst ## _ ## reg_name
block             100 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c #define SRI(reg_name, block, id)\
block             101 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             102 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 			mm ## block ## id ## _ ## reg_name
block             105 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
block             106 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 	.enable_reg = SRI(reg1, block, reg_num),\
block             108 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             110 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             111 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 		~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
block             113 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 	.ack_reg = SRI(reg2, block, reg_num),\
block             115 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
block             117 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
block             181 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c #define SRI(reg_name, block, id)\
block             182 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             183 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 			mm ## block ## id ## _ ## reg_name
block             186 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
block             187 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 	.enable_reg = SRI(reg1, block, reg_num),\
block             189 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             191 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             192 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 		~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
block             194 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 	.ack_reg = SRI(reg2, block, reg_num),\
block             196 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
block             198 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
block             183 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c #define SRI(reg_name, block, id)\
block             184 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             185 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 			mm ## block ## id ## _ ## reg_name
block             188 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
block             189 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 	.enable_reg = SRI(reg1, block, reg_num),\
block             191 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             193 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             194 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 		~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
block             196 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 	.ack_reg = SRI(reg2, block, reg_num),\
block             198 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
block             200 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
block             179 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c #define SRI(reg_name, block, id)\
block             180 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 	BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
block             181 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 			mm ## block ## id ## _ ## reg_name
block             184 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
block             185 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 	.enable_reg = SRI(reg1, block, reg_num),\
block             187 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             189 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 		block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
block             190 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 		~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
block             192 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 	.ack_reg = SRI(reg2, block, reg_num),\
block             194 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
block             196 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c 		block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
block             214 drivers/gpu/drm/amd/include/kgd_pp_interface.h #define PP_CG_MSG_ID(group, block, support, state) \
block             215 drivers/gpu/drm/amd/include/kgd_pp_interface.h 		((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \
block             405 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
block            1149 drivers/gpu/drm/arm/malidp_hw.c static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
block            1151 drivers/gpu/drm/arm/malidp_hw.c 	u32 base = malidp_get_block_base(hwdev, block);
block             288 drivers/gpu/drm/arm/malidp_hw.h 					u8 block)
block             290 drivers/gpu/drm/arm/malidp_hw.h 	switch (block) {
block             301 drivers/gpu/drm/arm/malidp_hw.h 					 u8 block, u32 irq)
block             303 drivers/gpu/drm/arm/malidp_hw.h 	u32 base = malidp_get_block_base(hwdev, block);
block             309 drivers/gpu/drm/arm/malidp_hw.h 					u8 block, u32 irq)
block             311 drivers/gpu/drm/arm/malidp_hw.h 	u32 base = malidp_get_block_base(hwdev, block);
block              74 drivers/gpu/drm/bochs/bochs_hw.c 				unsigned int block, size_t len)
block              77 drivers/gpu/drm/bochs/bochs_hw.c 	size_t i, start = block * EDID_LENGTH;
block             521 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
block             533 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c 	if (adv7511->current_edid_segment != block / 2) {
block             544 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c 				     block);
block             577 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c 		adv7511->current_edid_segment = block / 2;
block             580 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c 	if (block % 2 == 0)
block             197 drivers/gpu/drm/bridge/analogix/analogix_dp_core.h 				       enum analog_power_block block,
block             251 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c 				       enum analog_power_block block,
block             261 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c 	switch (block) {
block              73 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 	u8 *block = kmalloc(EDID_LENGTH, GFP_KERNEL);
block              85 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 			.buf	= block,
block              89 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 	if (!block)
block              97 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 	if (!drm_edid_block_valid(block, 0, false, NULL)) {
block             102 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 	total_size = (block[EDID_EXT_BLOCK_CNT] + 1) * EDID_LENGTH;
block             104 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 		kfree(block);
block             105 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 		block = kmalloc(total_size, GFP_KERNEL);
block             106 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 		if (!block)
block             114 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 		msgs[1].buf = block;
block             120 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 		if (!drm_edid_block_valid(block, 1, false, NULL)) {
block             126 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 	return block;
block             129 drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c 	kfree(block);
block            1381 drivers/gpu/drm/drm_edid.c bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
block            1393 drivers/gpu/drm/drm_edid.c 	if (block == 0) {
block            1502 drivers/gpu/drm/drm_edid.c drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
block            1505 drivers/gpu/drm/drm_edid.c 	unsigned char start = block * EDID_LENGTH;
block            1506 drivers/gpu/drm/drm_edid.c 	unsigned char segment = block >> 1;
block            1565 drivers/gpu/drm/drm_edid.c 		u8 *block = edid + i * EDID_LENGTH;
block            1568 drivers/gpu/drm/drm_edid.c 		if (drm_edid_is_zero(block, EDID_LENGTH))
block            1570 drivers/gpu/drm/drm_edid.c 		else if (!drm_edid_block_valid(block, i, false, NULL))
block            1577 drivers/gpu/drm/drm_edid.c 			       block, EDID_LENGTH, false);
block            1646 drivers/gpu/drm/drm_edid.c 	int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
block            1687 drivers/gpu/drm/drm_edid.c 		u8 *block = edid + j * EDID_LENGTH;
block            1690 drivers/gpu/drm/drm_edid.c 			if (get_edid_block(data, block, j, EDID_LENGTH))
block            1692 drivers/gpu/drm/drm_edid.c 			if (drm_edid_block_valid(block, j, false, NULL))
block            1715 drivers/gpu/drm/drm_edid.c 			u8 *block = edid + i * EDID_LENGTH;
block            1717 drivers/gpu/drm/drm_edid.c 			if (!drm_edid_block_valid(block, i, false, NULL))
block            1720 drivers/gpu/drm/drm_edid.c 			memcpy(base, block, EDID_LENGTH);
block            2945 drivers/gpu/drm/drm_edid.c 	struct displayid_block *block;
block            2964 drivers/gpu/drm/drm_edid.c 	for_each_displayid_db(displayid, block, idx, length) {
block            2965 drivers/gpu/drm/drm_edid.c 		if (block->tag == DATA_BLOCK_CTA) {
block            2966 drivers/gpu/drm/drm_edid.c 			cea = (u8 *)block;
block            4787 drivers/gpu/drm/drm_edid.c 					  struct displayid_block *block)
block            4789 drivers/gpu/drm/drm_edid.c 	struct displayid_detailed_timing_block *det = (struct displayid_detailed_timing_block *)block;
block            4795 drivers/gpu/drm/drm_edid.c 	if (block->num_bytes % 20)
block            4798 drivers/gpu/drm/drm_edid.c 	num_timings = block->num_bytes / 20;
block            4819 drivers/gpu/drm/drm_edid.c 	struct displayid_block *block;
block            4831 drivers/gpu/drm/drm_edid.c 	for_each_displayid_db(displayid, block, idx, length) {
block            4832 drivers/gpu/drm/drm_edid.c 		switch (block->tag) {
block            4834 drivers/gpu/drm/drm_edid.c 			num_modes += add_displayid_detailed_1_modes(connector, block);
block            5378 drivers/gpu/drm/drm_edid.c 				 struct displayid_block *block)
block            5380 drivers/gpu/drm/drm_edid.c 	struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
block            5437 drivers/gpu/drm/drm_edid.c 	struct displayid_block *block;
block            5448 drivers/gpu/drm/drm_edid.c 	for_each_displayid_db(displayid, block, idx, length) {
block            5450 drivers/gpu/drm/drm_edid.c 			      block->tag, block->rev, block->num_bytes);
block            5452 drivers/gpu/drm/drm_edid.c 		switch (block->tag) {
block            5454 drivers/gpu/drm/drm_edid.c 			ret = drm_parse_tiled_block(connector, block);
block            5465 drivers/gpu/drm/drm_edid.c 			DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
block            4632 drivers/gpu/drm/i915/display/intel_dp.c 		struct edid *block = intel_connector->detect_edid;
block            4637 drivers/gpu/drm/i915/display/intel_dp.c 		block += intel_connector->detect_edid->extensions;
block            4640 drivers/gpu/drm/i915/display/intel_dp.c 				       block->checksum) <= 0)
block            3291 drivers/gpu/drm/i915/gvt/handlers.c 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
block            3295 drivers/gpu/drm/i915/gvt/handlers.c 	for (i = 0; i < num; i++, block++) {
block            3296 drivers/gpu/drm/i915/gvt/handlers.c 		if (!(device & block->device))
block            3298 drivers/gpu/drm/i915/gvt/handlers.c 		if (offset >= i915_mmio_reg_offset(block->offset) &&
block            3299 drivers/gpu/drm/i915/gvt/handlers.c 		    offset < i915_mmio_reg_offset(block->offset) + block->size)
block            3300 drivers/gpu/drm/i915/gvt/handlers.c 			return block;
block            3409 drivers/gpu/drm/i915/gvt/handlers.c 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
block            3419 drivers/gpu/drm/i915/gvt/handlers.c 	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
block            3420 drivers/gpu/drm/i915/gvt/handlers.c 		for (j = 0; j < block->size; j += 4) {
block            3422 drivers/gpu/drm/i915/gvt/handlers.c 				      i915_mmio_reg_offset(block->offset) + j,
block              48 drivers/gpu/drm/i915/i915_buddy.c 	struct i915_buddy_block *block;
block              50 drivers/gpu/drm/i915/i915_buddy.c 	block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
block              51 drivers/gpu/drm/i915/i915_buddy.c 	if (!block)
block              54 drivers/gpu/drm/i915/i915_buddy.c 	block->header = offset;
block              55 drivers/gpu/drm/i915/i915_buddy.c 	block->header |= order;
block              56 drivers/gpu/drm/i915/i915_buddy.c 	block->parent = parent;
block              58 drivers/gpu/drm/i915/i915_buddy.c 	return block;
block              61 drivers/gpu/drm/i915/i915_buddy.c static void i915_block_free(struct i915_buddy_block *block)
block              63 drivers/gpu/drm/i915/i915_buddy.c 	kmem_cache_free(global.slab_blocks, block);
block              66 drivers/gpu/drm/i915/i915_buddy.c static void mark_allocated(struct i915_buddy_block *block)
block              68 drivers/gpu/drm/i915/i915_buddy.c 	block->header &= ~I915_BUDDY_HEADER_STATE;
block              69 drivers/gpu/drm/i915/i915_buddy.c 	block->header |= I915_BUDDY_ALLOCATED;
block              71 drivers/gpu/drm/i915/i915_buddy.c 	list_del(&block->link);
block              75 drivers/gpu/drm/i915/i915_buddy.c 		      struct i915_buddy_block *block)
block              77 drivers/gpu/drm/i915/i915_buddy.c 	block->header &= ~I915_BUDDY_HEADER_STATE;
block              78 drivers/gpu/drm/i915/i915_buddy.c 	block->header |= I915_BUDDY_FREE;
block              80 drivers/gpu/drm/i915/i915_buddy.c 	list_add(&block->link,
block              81 drivers/gpu/drm/i915/i915_buddy.c 		 &mm->free_list[i915_buddy_block_order(block)]);
block              84 drivers/gpu/drm/i915/i915_buddy.c static void mark_split(struct i915_buddy_block *block)
block              86 drivers/gpu/drm/i915/i915_buddy.c 	block->header &= ~I915_BUDDY_HEADER_STATE;
block              87 drivers/gpu/drm/i915/i915_buddy.c 	block->header |= I915_BUDDY_SPLIT;
block              89 drivers/gpu/drm/i915/i915_buddy.c 	list_del(&block->link);
block             187 drivers/gpu/drm/i915/i915_buddy.c 		       struct i915_buddy_block *block)
block             189 drivers/gpu/drm/i915/i915_buddy.c 	unsigned int block_order = i915_buddy_block_order(block) - 1;
block             190 drivers/gpu/drm/i915/i915_buddy.c 	u64 offset = i915_buddy_block_offset(block);
block             192 drivers/gpu/drm/i915/i915_buddy.c 	GEM_BUG_ON(!i915_buddy_block_is_free(block));
block             193 drivers/gpu/drm/i915/i915_buddy.c 	GEM_BUG_ON(!i915_buddy_block_order(block));
block             195 drivers/gpu/drm/i915/i915_buddy.c 	block->left = i915_block_alloc(block, block_order, offset);
block             196 drivers/gpu/drm/i915/i915_buddy.c 	if (!block->left)
block             199 drivers/gpu/drm/i915/i915_buddy.c 	block->right = i915_block_alloc(block, block_order,
block             201 drivers/gpu/drm/i915/i915_buddy.c 	if (!block->right) {
block             202 drivers/gpu/drm/i915/i915_buddy.c 		i915_block_free(block->left);
block             206 drivers/gpu/drm/i915/i915_buddy.c 	mark_free(mm, block->left);
block             207 drivers/gpu/drm/i915/i915_buddy.c 	mark_free(mm, block->right);
block             209 drivers/gpu/drm/i915/i915_buddy.c 	mark_split(block);
block             215 drivers/gpu/drm/i915/i915_buddy.c get_buddy(struct i915_buddy_block *block)
block             219 drivers/gpu/drm/i915/i915_buddy.c 	parent = block->parent;
block             223 drivers/gpu/drm/i915/i915_buddy.c 	if (parent->left == block)
block             230 drivers/gpu/drm/i915/i915_buddy.c 			      struct i915_buddy_block *block)
block             234 drivers/gpu/drm/i915/i915_buddy.c 	while ((parent = block->parent)) {
block             237 drivers/gpu/drm/i915/i915_buddy.c 		buddy = get_buddy(block);
block             244 drivers/gpu/drm/i915/i915_buddy.c 		i915_block_free(block);
block             247 drivers/gpu/drm/i915/i915_buddy.c 		block = parent;
block             250 drivers/gpu/drm/i915/i915_buddy.c 	mark_free(mm, block);
block             254 drivers/gpu/drm/i915/i915_buddy.c 		     struct i915_buddy_block *block)
block             256 drivers/gpu/drm/i915/i915_buddy.c 	GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
block             257 drivers/gpu/drm/i915/i915_buddy.c 	__i915_buddy_free(mm, block);
block             262 drivers/gpu/drm/i915/i915_buddy.c 	struct i915_buddy_block *block, *on;
block             264 drivers/gpu/drm/i915/i915_buddy.c 	list_for_each_entry_safe(block, on, objects, link)
block             265 drivers/gpu/drm/i915/i915_buddy.c 		i915_buddy_free(mm, block);
block             280 drivers/gpu/drm/i915/i915_buddy.c 	struct i915_buddy_block *block = NULL;
block             285 drivers/gpu/drm/i915/i915_buddy.c 		block = list_first_entry_or_null(&mm->free_list[i],
block             288 drivers/gpu/drm/i915/i915_buddy.c 		if (block)
block             292 drivers/gpu/drm/i915/i915_buddy.c 	if (!block)
block             295 drivers/gpu/drm/i915/i915_buddy.c 	GEM_BUG_ON(!i915_buddy_block_is_free(block));
block             298 drivers/gpu/drm/i915/i915_buddy.c 		err = split_block(mm, block);
block             303 drivers/gpu/drm/i915/i915_buddy.c 		block = block->left;
block             307 drivers/gpu/drm/i915/i915_buddy.c 	mark_allocated(block);
block             308 drivers/gpu/drm/i915/i915_buddy.c 	kmemleak_update_trace(block);
block             309 drivers/gpu/drm/i915/i915_buddy.c 	return block;
block             312 drivers/gpu/drm/i915/i915_buddy.c 	__i915_buddy_free(mm, block);
block             340 drivers/gpu/drm/i915/i915_buddy.c 	struct i915_buddy_block *block;
block             366 drivers/gpu/drm/i915/i915_buddy.c 		block = list_first_entry_or_null(&dfs,
block             369 drivers/gpu/drm/i915/i915_buddy.c 		if (!block)
block             372 drivers/gpu/drm/i915/i915_buddy.c 		list_del(&block->tmp_link);
block             374 drivers/gpu/drm/i915/i915_buddy.c 		block_start = i915_buddy_block_offset(block);
block             375 drivers/gpu/drm/i915/i915_buddy.c 		block_end = block_start + i915_buddy_block_size(mm, block) - 1;
block             380 drivers/gpu/drm/i915/i915_buddy.c 		if (i915_buddy_block_is_allocated(block)) {
block             386 drivers/gpu/drm/i915/i915_buddy.c 			if (!i915_buddy_block_is_free(block)) {
block             391 drivers/gpu/drm/i915/i915_buddy.c 			mark_allocated(block);
block             392 drivers/gpu/drm/i915/i915_buddy.c 			list_add_tail(&block->link, &allocated);
block             396 drivers/gpu/drm/i915/i915_buddy.c 		if (!i915_buddy_block_is_split(block)) {
block             397 drivers/gpu/drm/i915/i915_buddy.c 			err = split_block(mm, block);
block             402 drivers/gpu/drm/i915/i915_buddy.c 		list_add(&block->right->tmp_link, &dfs);
block             403 drivers/gpu/drm/i915/i915_buddy.c 		list_add(&block->left->tmp_link, &dfs);
block             415 drivers/gpu/drm/i915/i915_buddy.c 	buddy = get_buddy(block);
block             417 drivers/gpu/drm/i915/i915_buddy.c 	    (i915_buddy_block_is_free(block) &&
block             419 drivers/gpu/drm/i915/i915_buddy.c 		__i915_buddy_free(mm, block);
block              71 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_offset(struct i915_buddy_block *block)
block              73 drivers/gpu/drm/i915/i915_buddy.h 	return block->header & I915_BUDDY_HEADER_OFFSET;
block              77 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_order(struct i915_buddy_block *block)
block              79 drivers/gpu/drm/i915/i915_buddy.h 	return block->header & I915_BUDDY_HEADER_ORDER;
block              83 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_state(struct i915_buddy_block *block)
block              85 drivers/gpu/drm/i915/i915_buddy.h 	return block->header & I915_BUDDY_HEADER_STATE;
block              89 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_is_allocated(struct i915_buddy_block *block)
block              91 drivers/gpu/drm/i915/i915_buddy.h 	return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
block              95 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_is_free(struct i915_buddy_block *block)
block              97 drivers/gpu/drm/i915/i915_buddy.h 	return i915_buddy_block_state(block) == I915_BUDDY_FREE;
block             101 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_block_is_split(struct i915_buddy_block *block)
block             103 drivers/gpu/drm/i915/i915_buddy.h 	return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
block             108 drivers/gpu/drm/i915/i915_buddy.h 		      struct i915_buddy_block *block)
block             110 drivers/gpu/drm/i915/i915_buddy.h 	return mm->chunk_size << i915_buddy_block_order(block);
block             124 drivers/gpu/drm/i915/i915_buddy.h void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
block              14 drivers/gpu/drm/i915/selftests/i915_buddy.c 			     struct i915_buddy_block *block,
block              18 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       block->header,
block              19 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       i915_buddy_block_state(block),
block              20 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       i915_buddy_block_order(block),
block              21 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       i915_buddy_block_offset(block),
block              22 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       i915_buddy_block_size(mm, block),
block              23 drivers/gpu/drm/i915/selftests/i915_buddy.c 	       yesno(!block->parent),
block              28 drivers/gpu/drm/i915/selftests/i915_buddy.c 			   struct i915_buddy_block *block)
block              32 drivers/gpu/drm/i915/selftests/i915_buddy.c 	__igt_dump_block(mm, block, false);
block              34 drivers/gpu/drm/i915/selftests/i915_buddy.c 	buddy = get_buddy(block);
block              40 drivers/gpu/drm/i915/selftests/i915_buddy.c 			   struct i915_buddy_block *block)
block              48 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block_state = i915_buddy_block_state(block);
block              57 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block_size = i915_buddy_block_size(mm, block);
block              58 drivers/gpu/drm/i915/selftests/i915_buddy.c 	offset = i915_buddy_block_offset(block);
block              85 drivers/gpu/drm/i915/selftests/i915_buddy.c 	buddy = get_buddy(block);
block              87 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (!buddy && block->parent) {
block             118 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_block *block;
block             123 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = NULL;
block             127 drivers/gpu/drm/i915/selftests/i915_buddy.c 	list_for_each_entry(block, blocks, link) {
block             128 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = igt_check_block(mm, block);
block             130 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (!i915_buddy_block_is_allocated(block)) {
block             142 drivers/gpu/drm/i915/selftests/i915_buddy.c 			offset = i915_buddy_block_offset(block);
block             153 drivers/gpu/drm/i915/selftests/i915_buddy.c 		total += i915_buddy_block_size(mm, block);
block             154 drivers/gpu/drm/i915/selftests/i915_buddy.c 		prev = block;
block             171 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (block) {
block             173 drivers/gpu/drm/i915/selftests/i915_buddy.c 		igt_dump_block(mm, block);
block             203 drivers/gpu/drm/i915/selftests/i915_buddy.c 		struct i915_buddy_block *block;
block             244 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = list_first_entry_or_null(&mm->free_list[order],
block             247 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (block != root) {
block             317 drivers/gpu/drm/i915/selftests/i915_buddy.c 		struct i915_buddy_block *block;
block             335 drivers/gpu/drm/i915/selftests/i915_buddy.c 			block = i915_buddy_alloc(&mm, order);
block             336 drivers/gpu/drm/i915/selftests/i915_buddy.c 			if (IS_ERR(block)) {
block             337 drivers/gpu/drm/i915/selftests/i915_buddy.c 				err = PTR_ERR(block);
block             354 drivers/gpu/drm/i915/selftests/i915_buddy.c 			list_add_tail(&block->link, &blocks);
block             356 drivers/gpu/drm/i915/selftests/i915_buddy.c 			if (i915_buddy_block_order(block) != order) {
block             362 drivers/gpu/drm/i915/selftests/i915_buddy.c 			total += i915_buddy_block_size(&mm, block);
block             391 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_block *block, *bn;
block             411 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
block             412 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (IS_ERR(block)) {
block             415 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = PTR_ERR(block);
block             419 drivers/gpu/drm/i915/selftests/i915_buddy.c 		list_add_tail(&block->link, &blocks);
block             423 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = i915_buddy_alloc(&mm, 0);
block             424 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (IS_ERR(block)) {
block             426 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = PTR_ERR(block);
block             429 drivers/gpu/drm/i915/selftests/i915_buddy.c 	list_add_tail(&block->link, &blocks);
block             433 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
block             434 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (!IS_ERR(block)) {
block             437 drivers/gpu/drm/i915/selftests/i915_buddy.c 			list_add_tail(&block->link, &blocks);
block             443 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = list_last_entry(&blocks, typeof(*block), link);
block             444 drivers/gpu/drm/i915/selftests/i915_buddy.c 	list_del(&block->link);
block             445 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free(&mm, block);
block             449 drivers/gpu/drm/i915/selftests/i915_buddy.c 	list_for_each_entry_safe(block, bn, &blocks, link) {
block             450 drivers/gpu/drm/i915/selftests/i915_buddy.c 		list_del(&block->link);
block             451 drivers/gpu/drm/i915/selftests/i915_buddy.c 		i915_buddy_free(&mm, block);
block             453 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
block             454 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (IS_ERR(block)) {
block             457 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = PTR_ERR(block);
block             460 drivers/gpu/drm/i915/selftests/i915_buddy.c 		i915_buddy_free(&mm, block);
block             465 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = i915_buddy_alloc(&mm, max_order);
block             466 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (IS_ERR(block)) {
block             469 drivers/gpu/drm/i915/selftests/i915_buddy.c 		err = PTR_ERR(block);
block             472 drivers/gpu/drm/i915/selftests/i915_buddy.c 	i915_buddy_free(&mm, block);
block             483 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_block *block;
block             504 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
block             505 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (IS_ERR(block)) {
block             508 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = PTR_ERR(block);
block             512 drivers/gpu/drm/i915/selftests/i915_buddy.c 		list_add_tail(&block->link, &blocks);
block             516 drivers/gpu/drm/i915/selftests/i915_buddy.c 	block = i915_buddy_alloc(&mm, 0);
block             517 drivers/gpu/drm/i915/selftests/i915_buddy.c 	if (!IS_ERR(block)) {
block             519 drivers/gpu/drm/i915/selftests/i915_buddy.c 		list_add_tail(&block->link, &blocks);
block             533 drivers/gpu/drm/i915/selftests/i915_buddy.c 	struct i915_buddy_block *block;
block             556 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
block             557 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (block) {
block             558 drivers/gpu/drm/i915/selftests/i915_buddy.c 			list_del(&block->link);
block             559 drivers/gpu/drm/i915/selftests/i915_buddy.c 			i915_buddy_free(&mm, block);
block             563 drivers/gpu/drm/i915/selftests/i915_buddy.c 			block = i915_buddy_alloc(&mm, order);
block             564 drivers/gpu/drm/i915/selftests/i915_buddy.c 			if (IS_ERR(block)) {
block             567 drivers/gpu/drm/i915/selftests/i915_buddy.c 				err = PTR_ERR(block);
block             570 drivers/gpu/drm/i915/selftests/i915_buddy.c 			list_add_tail(&block->link, &blocks);
block             574 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, 0);
block             575 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (IS_ERR(block)) {
block             577 drivers/gpu/drm/i915/selftests/i915_buddy.c 			err = PTR_ERR(block);
block             580 drivers/gpu/drm/i915/selftests/i915_buddy.c 		list_add_tail(&block->link, &holes);
block             582 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, top);
block             583 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (!IS_ERR(block)) {
block             586 drivers/gpu/drm/i915/selftests/i915_buddy.c 			list_add_tail(&block->link, &blocks);
block             596 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
block             597 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (!IS_ERR(block)) {
block             600 drivers/gpu/drm/i915/selftests/i915_buddy.c 			list_add_tail(&block->link, &blocks);
block             644 drivers/gpu/drm/i915/selftests/i915_buddy.c 		struct i915_buddy_block *block;
block             662 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = list_first_entry_or_null(&tmp,
block             665 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (!block) {
block             671 drivers/gpu/drm/i915/selftests/i915_buddy.c 		if (i915_buddy_block_offset(block) != offset) {
block             673 drivers/gpu/drm/i915/selftests/i915_buddy.c 			       i915_buddy_block_offset(block), offset);
block             156 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
block             160 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
block             183 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
block             187 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
block             284 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		const struct a6xx_debugbus_block *block,
block             290 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
block             294 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	obj->handle = block;
block             296 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	for (ptr = obj->data, i = 0; i < block->count; i++)
block             297 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		ptr += debugbus_read(gpu, block->id, i, ptr);
block             302 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		const struct a6xx_debugbus_block *block,
block             308 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
block             312 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	obj->handle = block;
block             314 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	for (ptr = obj->data, i = 0; i < block->count; i++)
block             315 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
block             566 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		const struct a6xx_shader_block *block,
block             571 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
block             579 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			(block->type << 8) | i);
block             582 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
block             590 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	obj->handle = block;
block             986 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	const struct a6xx_shader_block *block = obj->handle;
block             992 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	print_name(p, "  - type: ", block->name);
block             996 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "      size: %d\n", block->size);
block            1001 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_ascii85(p, block->size << 2,
block            1002 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			obj->data + (block->size * i));
block            1070 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
block            1073 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	if (block) {
block            1074 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_name(p, "  - debugbus-block: ", block->name);
block            1080 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "    count: %d\n", block->count << 1);
block            1082 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_ascii85(p, block->count << 3, data);
block              34 drivers/gpu/drm/nouveau/include/nvkm/core/mm.h int  nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
block              13 drivers/gpu/drm/nouveau/include/nvkm/core/notify.h 	int block;
block             240 drivers/gpu/drm/nouveau/nvkm/core/mm.c nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
block             257 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		BUG_ON(block != mm->block_size);
block             261 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		mm->block_size = block;
block              30 drivers/gpu/drm/nouveau/nvkm/core/notify.c 	if (notify->block++ == 0)
block              52 drivers/gpu/drm/nouveau/nvkm/core/notify.c 	if (--notify->block == 0)
block             100 drivers/gpu/drm/nouveau/nvkm/core/notify.c 	if (notify->block) {
block             143 drivers/gpu/drm/nouveau/nvkm/core/notify.c 			notify->block = 1;
block             635 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	u64 block;
block             653 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				block = (part >> page[i].shift) << page[i].shift;
block             655 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				block = (size >> page[i].shift) << page[i].shift;
block             657 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			block = (size >> page[i].shift) << page[i].shift;
block             662 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
block             669 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
block             672 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		size -= block;
block             673 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		addr += block;
block             516 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c int tiler_pin(struct tiler_block *block, struct page **pages,
block             521 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	ret = fill(&block->area, pages, npages, roll, wait);
block             524 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		tiler_unpin(block);
block             529 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c int tiler_unpin(struct tiler_block *block)
block             531 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return fill(&block->area, NULL, 0, 0, false);
block             540 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tiler_block *block;
block             546 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             547 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (!block)
block             562 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	block->fmt = fmt;
block             565 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			&block->area);
block             567 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		kfree(block);
block             573 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	list_add(&block->alloc_node, &omap_dmm->alloc_head);
block             576 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return block;
block             581 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
block             585 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (!block)
block             588 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	block->fmt = TILFMT_PAGE;
block             591 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 				&block->area)) {
block             592 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		kfree(block);
block             597 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	list_add(&block->alloc_node, &omap_dmm->alloc_head);
block             600 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return block;
block             604 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c int tiler_release(struct tiler_block *block)
block             606 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	int ret = tcm_free(&block->area);
block             609 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (block->area.tcm)
block             613 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	list_del(&block->alloc_node);
block             616 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	kfree(block);
block             676 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c dma_addr_t tiler_ssptr(struct tiler_block *block)
block             678 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	BUG_ON(!validfmt(block->fmt));
block             680 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
block             681 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			block->area.p0.x * geom[block->fmt].slot_w,
block             682 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			block->area.p0.y * geom[block->fmt].slot_h);
block             685 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
block             688 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tcm_pt *p = &block->area.p0;
block             689 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	BUG_ON(!validfmt(block->fmt));
block             691 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return tiler_get_address(block->fmt, orient,
block             692 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			(p->x * geom[block->fmt].slot_w) + x,
block             693 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			(p->y * geom[block->fmt].slot_h) + y);
block             737 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tiler_block *block, *_block;
block             748 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
block             750 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			list_del(&block->alloc_node);
block             751 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			kfree(block);
block            1079 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tiler_block *block;
block            1117 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
block            1118 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
block            1119 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 				if (block->fmt != TILFMT_PAGE) {
block            1120 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 					fill_map(map, xdiv, ydiv, &block->area,
block            1127 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 							&block->area);
block            1130 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 						ydiv, &block->area.p0) == ' ';
block            1132 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 							&block->area.p1) == ' ';
block            1134 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 					tcm_for_each_slice(a, block->area, p)
block            1138 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 							&block->area.p0,
block            1141 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 							&block->area.p1,
block            1144 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 							&block->area);
block              90 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h int tiler_pin(struct tiler_block *block, struct page **pages,
block              92 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h int tiler_unpin(struct tiler_block *block);
block              98 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h int tiler_release(struct tiler_block *block);
block             101 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h dma_addr_t tiler_ssptr(struct tiler_block *block);
block             102 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
block              81 drivers/gpu/drm/omapdrm/omap_gem.c 	struct tiler_block *block;
block             113 drivers/gpu/drm/omapdrm/omap_gem.c 	struct tiler_block *block;	/* the reserved tiler block */
block             450 drivers/gpu/drm/omapdrm/omap_gem.c 	err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
block             660 drivers/gpu/drm/omapdrm/omap_gem.c 	if (omap_obj->block) {
block             665 drivers/gpu/drm/omapdrm/omap_gem.c 		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
block             779 drivers/gpu/drm/omapdrm/omap_gem.c 			struct tiler_block *block;
block             781 drivers/gpu/drm/omapdrm/omap_gem.c 			BUG_ON(omap_obj->block);
block             788 drivers/gpu/drm/omapdrm/omap_gem.c 				block = tiler_reserve_2d(fmt,
block             792 drivers/gpu/drm/omapdrm/omap_gem.c 				block = tiler_reserve_1d(obj->size);
block             795 drivers/gpu/drm/omapdrm/omap_gem.c 			if (IS_ERR(block)) {
block             796 drivers/gpu/drm/omapdrm/omap_gem.c 				ret = PTR_ERR(block);
block             803 drivers/gpu/drm/omapdrm/omap_gem.c 			ret = tiler_pin(block, omap_obj->pages, npages,
block             806 drivers/gpu/drm/omapdrm/omap_gem.c 				tiler_release(block);
block             812 drivers/gpu/drm/omapdrm/omap_gem.c 			omap_obj->dma_addr = tiler_ssptr(block);
block             813 drivers/gpu/drm/omapdrm/omap_gem.c 			omap_obj->block = block;
block             852 drivers/gpu/drm/omapdrm/omap_gem.c 			ret = tiler_unpin(omap_obj->block);
block             857 drivers/gpu/drm/omapdrm/omap_gem.c 			ret = tiler_release(omap_obj->block);
block             863 drivers/gpu/drm/omapdrm/omap_gem.c 			omap_obj->block = NULL;
block             882 drivers/gpu/drm/omapdrm/omap_gem.c 	if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
block             884 drivers/gpu/drm/omapdrm/omap_gem.c 		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
block             996 drivers/gpu/drm/omapdrm/omap_gem.c 		if (omap_obj->block) {
block            1001 drivers/gpu/drm/omapdrm/omap_gem.c 			ret = tiler_pin(omap_obj->block,
block            1038 drivers/gpu/drm/omapdrm/omap_gem.c 		if (omap_obj->block) {
block            1039 drivers/gpu/drm/omapdrm/omap_gem.c 			struct tcm_area *area = &omap_obj->block->area;
block            1350 drivers/gpu/drm/omapdrm/omap_gem.c 			struct tiler_block *block;
block            1353 drivers/gpu/drm/omapdrm/omap_gem.c 			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
block            1354 drivers/gpu/drm/omapdrm/omap_gem.c 			if (IS_ERR(block)) {
block            1357 drivers/gpu/drm/omapdrm/omap_gem.c 						i, j, PTR_ERR(block));
block            1360 drivers/gpu/drm/omapdrm/omap_gem.c 			entry->dma_addr = tiler_ssptr(block);
block            1361 drivers/gpu/drm/omapdrm/omap_gem.c 			entry->block = block;
block             179 drivers/gpu/drm/radeon/ci_dpm.c 			  u32 block, bool enable);
block            5670 drivers/gpu/drm/radeon/cik.c 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
block            5681 drivers/gpu/drm/radeon/cik.c 	       block, mc_client, mc_id);
block            6294 drivers/gpu/drm/radeon/cik.c 		   u32 block, bool enable)
block            6297 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_GFX) {
block            6310 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_MC) {
block            6317 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_SDMA) {
block            6322 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_BIF) {
block            6326 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_UVD) {
block            6331 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_HDP) {
block            6336 drivers/gpu/drm/radeon/cik.c 	if (block & RADEON_CG_BLOCK_VCE) {
block              66 drivers/gpu/drm/radeon/kv_dpm.c 			  u32 block, bool enable);
block            2539 drivers/gpu/drm/radeon/ni.c 	char *block;
block            2550 drivers/gpu/drm/radeon/ni.c 		block = "CB";
block            2560 drivers/gpu/drm/radeon/ni.c 		block = "CB_FMASK";
block            2570 drivers/gpu/drm/radeon/ni.c 		block = "CB_CMASK";
block            2580 drivers/gpu/drm/radeon/ni.c 		block = "CB_IMMED";
block            2590 drivers/gpu/drm/radeon/ni.c 		block = "DB";
block            2600 drivers/gpu/drm/radeon/ni.c 		block = "DB_HTILE";
block            2610 drivers/gpu/drm/radeon/ni.c 		block = "SX";
block            2620 drivers/gpu/drm/radeon/ni.c 		block = "DB_STEN";
block            2630 drivers/gpu/drm/radeon/ni.c 		block = "TC_TFETCH";
block            2640 drivers/gpu/drm/radeon/ni.c 		block = "TC_VFETCH";
block            2650 drivers/gpu/drm/radeon/ni.c 		block = "VC";
block            2653 drivers/gpu/drm/radeon/ni.c 		block = "CP";
block            2657 drivers/gpu/drm/radeon/ni.c 		block = "SH";
block            2660 drivers/gpu/drm/radeon/ni.c 		block = "VGT";
block            2663 drivers/gpu/drm/radeon/ni.c 		block = "IH";
block            2666 drivers/gpu/drm/radeon/ni.c 		block = "RLC";
block            2669 drivers/gpu/drm/radeon/ni.c 		block = "DMA";
block            2672 drivers/gpu/drm/radeon/ni.c 		block = "HDP";
block            2675 drivers/gpu/drm/radeon/ni.c 		block = "unknown";
block            2682 drivers/gpu/drm/radeon/ni.c 	       block, mc_id);
block             812 drivers/gpu/drm/radeon/radeon.h void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
block             813 drivers/gpu/drm/radeon/radeon.h void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
block              30 drivers/gpu/drm/radeon/radeon_audio.h #define RREG32_ENDPOINT(block, reg)		\
block              31 drivers/gpu/drm/radeon/radeon_audio.h 	radeon_audio_endpoint_rreg(rdev, (block), (reg))
block              32 drivers/gpu/drm/radeon/radeon_audio.h #define WREG32_ENDPOINT(block, reg, v)	\
block              33 drivers/gpu/drm/radeon/radeon_audio.h 	radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
block             470 drivers/gpu/drm/radeon/radeon_irq_kms.c void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
block             478 drivers/gpu/drm/radeon/radeon_irq_kms.c 	rdev->irq.afmt[block] = true;
block             492 drivers/gpu/drm/radeon/radeon_irq_kms.c void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
block             500 drivers/gpu/drm/radeon/radeon_irq_kms.c 	rdev->irq.afmt[block] = false;
block            4827 drivers/gpu/drm/radeon/si.c 	char *block;
block            4839 drivers/gpu/drm/radeon/si.c 			block = "CB";
block            4849 drivers/gpu/drm/radeon/si.c 			block = "CB_FMASK";
block            4859 drivers/gpu/drm/radeon/si.c 			block = "CB_CMASK";
block            4869 drivers/gpu/drm/radeon/si.c 			block = "CB_IMMED";
block            4879 drivers/gpu/drm/radeon/si.c 			block = "DB";
block            4889 drivers/gpu/drm/radeon/si.c 			block = "DB_HTILE";
block            4899 drivers/gpu/drm/radeon/si.c 			block = "DB_STEN";
block            4913 drivers/gpu/drm/radeon/si.c 			block = "TC";
block            4917 drivers/gpu/drm/radeon/si.c 			block = "CP";
block            4923 drivers/gpu/drm/radeon/si.c 			block = "SH";
block            4927 drivers/gpu/drm/radeon/si.c 			block = "VGT";
block            4930 drivers/gpu/drm/radeon/si.c 			block = "IH";
block            4934 drivers/gpu/drm/radeon/si.c 			block = "RLC";
block            4938 drivers/gpu/drm/radeon/si.c 			block = "DMA0";
block            4941 drivers/gpu/drm/radeon/si.c 			block = "DMA1";
block            4945 drivers/gpu/drm/radeon/si.c 			block = "HDP";
block            4948 drivers/gpu/drm/radeon/si.c 			block = "unknown";
block            4961 drivers/gpu/drm/radeon/si.c 			block = "CB";
block            4971 drivers/gpu/drm/radeon/si.c 			block = "CB_FMASK";
block            4981 drivers/gpu/drm/radeon/si.c 			block = "CB_CMASK";
block            4991 drivers/gpu/drm/radeon/si.c 			block = "CB_IMMED";
block            5001 drivers/gpu/drm/radeon/si.c 			block = "DB";
block            5011 drivers/gpu/drm/radeon/si.c 			block = "DB_HTILE";
block            5021 drivers/gpu/drm/radeon/si.c 			block = "DB_STEN";
block            5031 drivers/gpu/drm/radeon/si.c 			block = "TC";
block            5035 drivers/gpu/drm/radeon/si.c 			block = "CP";
block            5041 drivers/gpu/drm/radeon/si.c 			block = "SH";
block            5044 drivers/gpu/drm/radeon/si.c 			block = "VGT";
block            5047 drivers/gpu/drm/radeon/si.c 			block = "IH";
block            5051 drivers/gpu/drm/radeon/si.c 			block = "RLC";
block            5055 drivers/gpu/drm/radeon/si.c 			block = "DMA0";
block            5058 drivers/gpu/drm/radeon/si.c 			block = "DMA1";
block            5062 drivers/gpu/drm/radeon/si.c 			block = "HDP";
block            5065 drivers/gpu/drm/radeon/si.c 			block = "unknown";
block            5073 drivers/gpu/drm/radeon/si.c 	       block, mc_id);
block            5618 drivers/gpu/drm/radeon/si.c 			 u32 block, bool enable)
block            5620 drivers/gpu/drm/radeon/si.c 	if (block & RADEON_CG_BLOCK_GFX) {
block            5633 drivers/gpu/drm/radeon/si.c 	if (block & RADEON_CG_BLOCK_MC) {
block            5638 drivers/gpu/drm/radeon/si.c 	if (block & RADEON_CG_BLOCK_SDMA) {
block            5642 drivers/gpu/drm/radeon/si.c 	if (block & RADEON_CG_BLOCK_BIF) {
block            5646 drivers/gpu/drm/radeon/si.c 	if (block & RADEON_CG_BLOCK_UVD) {
block            5652 drivers/gpu/drm/radeon/si.c 	if (block & RADEON_CG_BLOCK_HDP) {
block             435 drivers/gpu/drm/rockchip/cdn-dp-reg.c 			  unsigned int block, size_t length)
block             442 drivers/gpu/drm/rockchip/cdn-dp-reg.c 		msg[0] = block / 2;
block             443 drivers/gpu/drm/rockchip/cdn-dp-reg.c 		msg[1] = block % 2;
block             464 drivers/gpu/drm/rockchip/cdn-dp-reg.c 		if (reg[0] == length && reg[1] == block / 2)
block             469 drivers/gpu/drm/rockchip/cdn-dp-reg.c 		DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block,
block             467 drivers/gpu/drm/rockchip/cdn-dp-reg.h 			  unsigned int block, size_t length);
block             265 drivers/gpu/drm/tiny/gm12u320.c 	int block, dst_offset, len, remain, ret, x1, x2, y1, y2;
block             305 drivers/gpu/drm/tiny/gm12u320.c 		block = dst_offset / DATA_BLOCK_CONTENT_SIZE;
block             317 drivers/gpu/drm/tiny/gm12u320.c 			gm12u320->data_buf[block] + dst_offset,
block             321 drivers/gpu/drm/tiny/gm12u320.c 			block++;
block             324 drivers/gpu/drm/tiny/gm12u320.c 				gm12u320->data_buf[block] + dst_offset,
block             350 drivers/gpu/drm/tiny/gm12u320.c 	int block, block_size, len;
block             357 drivers/gpu/drm/tiny/gm12u320.c 		for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
block             358 drivers/gpu/drm/tiny/gm12u320.c 			if (block == GM12U320_BLOCK_COUNT - 1)
block             367 drivers/gpu/drm/tiny/gm12u320.c 			gm12u320->cmd_buf[20] = 0xfc - block * 4;
block             368 drivers/gpu/drm/tiny/gm12u320.c 			gm12u320->cmd_buf[21] = block | (frame << 7);
block             380 drivers/gpu/drm/tiny/gm12u320.c 				gm12u320->data_buf[block], block_size,
block              16 drivers/gpu/drm/udl/udl_connector.c static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
block              28 drivers/gpu/drm/udl/udl_connector.c 		int bval = (i + block * EDID_LENGTH) << 8;
block             608 drivers/gpu/drm/virtio/virtgpu_vq.c 				 unsigned int block, size_t len)
block             611 drivers/gpu/drm/virtio/virtgpu_vq.c 	size_t start = block * EDID_LENGTH;
block              88 drivers/gpu/drm/vmwgfx/ttm_lock.c 	bool block = true;
block              95 drivers/gpu/drm/vmwgfx/ttm_lock.c 		block = false;
block              98 drivers/gpu/drm/vmwgfx/ttm_lock.c 		block = false;
block             102 drivers/gpu/drm/vmwgfx/ttm_lock.c 	return !block;
block             693 drivers/hid/hid-cp2112.c 			read_length = data->block[0];
block             698 drivers/hid/hid-cp2112.c 						 data->block + 1,
block             699 drivers/hid/hid-cp2112.c 						 data->block[0]);
block             709 drivers/hid/hid-cp2112.c 						 data->block,
block             710 drivers/hid/hid-cp2112.c 						 data->block[0] + 1);
block             718 drivers/hid/hid-cp2112.c 					      command, data->block,
block             719 drivers/hid/hid-cp2112.c 					      data->block[0] + 1);
block             790 drivers/hid/hid-cp2112.c 		memcpy(data->block + 1, buf, read_length);
block             798 drivers/hid/hid-cp2112.c 		memcpy(data->block, buf, read_length);
block            1026 drivers/hwmon/occ/common.c 	struct occ_sensor_data_block *block = &poll->block;
block            1032 drivers/hwmon/occ/common.c 		block = (struct occ_sensor_data_block *)((u8 *)block + offset);
block            1034 drivers/hwmon/occ/common.c 		offset = (block->header.num_sensors *
block            1035 drivers/hwmon/occ/common.c 			  block->header.sensor_length) + sizeof(block->header);
block            1045 drivers/hwmon/occ/common.c 			old_offset, offset - 1, block->header.eye_catcher,
block            1046 drivers/hwmon/occ/common.c 			block->header.num_sensors);
block            1049 drivers/hwmon/occ/common.c 		if (strncmp(block->header.eye_catcher, "TEMP", 4) == 0)
block            1051 drivers/hwmon/occ/common.c 		else if (strncmp(block->header.eye_catcher, "FREQ", 4) == 0)
block            1053 drivers/hwmon/occ/common.c 		else if (strncmp(block->header.eye_catcher, "POWR", 4) == 0)
block            1055 drivers/hwmon/occ/common.c 		else if (strncmp(block->header.eye_catcher, "CAPS", 4) == 0)
block            1057 drivers/hwmon/occ/common.c 		else if (strncmp(block->header.eye_catcher, "EXTN", 4) == 0)
block            1061 drivers/hwmon/occ/common.c 				 block->header.eye_catcher);
block            1065 drivers/hwmon/occ/common.c 		sensor->num_sensors = block->header.num_sensors;
block            1066 drivers/hwmon/occ/common.c 		sensor->version = block->header.sensor_format;
block            1067 drivers/hwmon/occ/common.c 		sensor->data = &block->data;
block              61 drivers/hwmon/occ/common.h 	struct occ_sensor_data_block block;
block              56 drivers/hwtracing/intel_th/msu-sink.c 	void *block;
block              71 drivers/hwtracing/intel_th/msu-sink.c 		block = dma_alloc_coherent(priv->dev->parent->parent,
block              74 drivers/hwtracing/intel_th/msu-sink.c 		sg_set_buf(sg_ptr, block, PAGE_SIZE);
block              99 drivers/hwtracing/intel_th/msu.c 	struct scatterlist	*block;
block             433 drivers/hwtracing/intel_th/msu.c 	return sg_virt(iter->block);
block             482 drivers/hwtracing/intel_th/msu.c 	iter->block = iter->start_block;
block             532 drivers/hwtracing/intel_th/msu.c 	if (iter->wrap_count && iter->block == iter->start_block) {
block             545 drivers/hwtracing/intel_th/msu.c 	if (sg_is_last(iter->block))
block             546 drivers/hwtracing/intel_th/msu.c 		iter->block = msc_win_base_sg(iter->win);
block             548 drivers/hwtracing/intel_th/msu.c 		iter->block = sg_next(iter->block);
block             551 drivers/hwtracing/intel_th/msu.c 	if (!iter->wrap_count && iter->block == iter->start_block)
block             608 drivers/hwtracing/intel_th/msu.c 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
block             993 drivers/hwtracing/intel_th/msu.c 	void *block;
block            1001 drivers/hwtracing/intel_th/msu.c 		block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
block            1004 drivers/hwtracing/intel_th/msu.c 		if (!block)
block            1007 drivers/hwtracing/intel_th/msu.c 		sg_set_buf(sg_ptr, block, PAGE_SIZE);
block             406 drivers/i2c/busses/i2c-ali1535.c 			len = data->block[0];
block             409 drivers/i2c/busses/i2c-ali1535.c 				data->block[0] = len;
block             413 drivers/i2c/busses/i2c-ali1535.c 				data->block[0] = len;
block             419 drivers/i2c/busses/i2c-ali1535.c 				outb_p(data->block[i], SMBBLKDAT);
block             451 drivers/i2c/busses/i2c-ali1535.c 		data->block[0] = len;
block             454 drivers/i2c/busses/i2c-ali1535.c 		for (i = 1; i <= data->block[0]; i++) {
block             455 drivers/i2c/busses/i2c-ali1535.c 			data->block[i] = inb_p(SMBBLKDAT);
block             457 drivers/i2c/busses/i2c-ali1535.c 				len, i, data->block[i]);
block             196 drivers/i2c/busses/i2c-ali1563.c 		len = data->block[0];
block             202 drivers/i2c/busses/i2c-ali1563.c 		outb_p(data->block[1], SMB_BLK_DAT);
block             210 drivers/i2c/busses/i2c-ali1563.c 			outb_p(data->block[i + 1], SMB_BLK_DAT);
block             225 drivers/i2c/busses/i2c-ali1563.c 			data->block[i+1] = inb_p(SMB_BLK_DAT);
block             390 drivers/i2c/busses/i2c-ali15x3.c 			len = data->block[0];
block             393 drivers/i2c/busses/i2c-ali15x3.c 				data->block[0] = len;
block             397 drivers/i2c/busses/i2c-ali15x3.c 				data->block[0] = len;
block             403 drivers/i2c/busses/i2c-ali15x3.c 				outb_p(data->block[i], SMBBLKDAT);
block             436 drivers/i2c/busses/i2c-ali15x3.c 		data->block[0] = len;
block             439 drivers/i2c/busses/i2c-ali15x3.c 		for (i = 1; i <= data->block[0]; i++) {
block             440 drivers/i2c/busses/i2c-ali15x3.c 			data->block[i] = inb_p(SMBBLKDAT);
block             442 drivers/i2c/busses/i2c-ali15x3.c 				len, i, data->block[i]);
block             222 drivers/i2c/busses/i2c-amd756.c 			len = data->block[0];
block             230 drivers/i2c/busses/i2c-amd756.c 				outb_p(data->block[i],
block             262 drivers/i2c/busses/i2c-amd756.c 		data->block[0] = inw_p(SMB_HOST_DATA) & 0x3f;
block             263 drivers/i2c/busses/i2c-amd756.c 		if(data->block[0] > 32)
block             264 drivers/i2c/busses/i2c-amd756.c 			data->block[0] = 32;
block             266 drivers/i2c/busses/i2c-amd756.c 		for (i = 1; i <= data->block[0]; i++)
block             267 drivers/i2c/busses/i2c-amd756.c 			data->block[i] = inb_p(SMB_HOST_BLOCK_DATA);
block             252 drivers/i2c/busses/i2c-amd8111.c 				len = min_t(u8, data->block[0],
block             260 drivers/i2c/busses/i2c-amd8111.c 						       data->block[i + 1]);
block             269 drivers/i2c/busses/i2c-amd8111.c 			len = min_t(u8, data->block[0],
block             281 drivers/i2c/busses/i2c-amd8111.c 						       data->block[i + 1]);
block             305 drivers/i2c/busses/i2c-amd8111.c 			len = min_t(u8, data->block[0],
block             315 drivers/i2c/busses/i2c-amd8111.c 						      data->block[i + 1]);
block             388 drivers/i2c/busses/i2c-amd8111.c 						     data->block + i + 1);
block             392 drivers/i2c/busses/i2c-amd8111.c 			data->block[0] = len;
block             297 drivers/i2c/busses/i2c-highlander.c 		dev->buf = &data->block[1];
block             298 drivers/i2c/busses/i2c-highlander.c 		dev->buf_len = data->block[0];
block             537 drivers/i2c/busses/i2c-i801.c 		len = data->block[0];
block             540 drivers/i2c/busses/i2c-i801.c 			outb_p(data->block[i+1], SMBBLKDAT(priv));
block             553 drivers/i2c/busses/i2c-i801.c 		data->block[0] = len;
block             555 drivers/i2c/busses/i2c-i801.c 			data->block[i + 1] = inb_p(SMBBLKDAT(priv));
block             693 drivers/i2c/busses/i2c-i801.c 	len = data->block[0];
block             697 drivers/i2c/busses/i2c-i801.c 		outb_p(data->block[1], SMBBLKDAT(priv));
block             713 drivers/i2c/busses/i2c-i801.c 		priv->data = &data->block[1];
block             756 drivers/i2c/busses/i2c-i801.c 			data->block[0] = len;
block             761 drivers/i2c/busses/i2c-i801.c 			data->block[i] = inb_p(SMBBLKDAT(priv));
block             763 drivers/i2c/busses/i2c-i801.c 			outb_p(data->block[i+1], SMBBLKDAT(priv));
block             805 drivers/i2c/busses/i2c-i801.c 		if (data->block[0] < 1)
block             806 drivers/i2c/busses/i2c-i801.c 			data->block[0] = 1;
block             807 drivers/i2c/busses/i2c-i801.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
block             808 drivers/i2c/busses/i2c-i801.c 			data->block[0] = I2C_SMBUS_BLOCK_MAX;
block             810 drivers/i2c/busses/i2c-i801.c 		data->block[0] = 32;	/* max for SMBus block reads */
block             841 drivers/i2c/busses/i2c-i801.c 	int block = 0;
block             892 drivers/i2c/busses/i2c-i801.c 		block = 1;
block             911 drivers/i2c/busses/i2c-i801.c 		block = 1;
block             920 drivers/i2c/busses/i2c-i801.c 		block = 1;
block             935 drivers/i2c/busses/i2c-i801.c 	if (block)
block             944 drivers/i2c/busses/i2c-i801.c 	if (hwpec || block)
block             948 drivers/i2c/busses/i2c-i801.c 	if (block)
block             196 drivers/i2c/busses/i2c-isch.c 			len = data->block[0];
block             201 drivers/i2c/busses/i2c-isch.c 				outb(data->block[i], SMBBLKDAT+i-1);
block             228 drivers/i2c/busses/i2c-isch.c 		data->block[0] = inb(SMBHSTDAT0);
block             229 drivers/i2c/busses/i2c-isch.c 		if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
block             231 drivers/i2c/busses/i2c-isch.c 		for (i = 1; i <= data->block[0]; i++)
block             232 drivers/i2c/busses/i2c-isch.c 			data->block[i] = inb(SMBBLKDAT+i-1);
block             350 drivers/i2c/busses/i2c-ismt.c 			memcpy(data->block, dma_buffer, desc->rxbytes);
block             353 drivers/i2c/busses/i2c-ismt.c 			memcpy(&data->block[1], dma_buffer, desc->rxbytes);
block             354 drivers/i2c/busses/i2c-ismt.c 			data->block[0] = desc->rxbytes;
block             503 drivers/i2c/busses/i2c-ismt.c 			dma_size = data->block[0] + 1;
block             508 drivers/i2c/busses/i2c-ismt.c 			memcpy(&dma_buffer[1], &data->block[1], dma_size - 1);
block             522 drivers/i2c/busses/i2c-ismt.c 		if (data->block[0] < 1)
block             523 drivers/i2c/busses/i2c-ismt.c 			data->block[0] = 1;
block             525 drivers/i2c/busses/i2c-ismt.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
block             526 drivers/i2c/busses/i2c-ismt.c 			data->block[0] = I2C_SMBUS_BLOCK_MAX;
block             531 drivers/i2c/busses/i2c-ismt.c 			dma_size = data->block[0] + 1;
block             536 drivers/i2c/busses/i2c-ismt.c 			memcpy(&dma_buffer[1], &data->block[1], dma_size - 1);
block             540 drivers/i2c/busses/i2c-ismt.c 			dma_size = data->block[0];
block             133 drivers/i2c/busses/i2c-mv64xxx.c 	u32			block;
block             407 drivers/i2c/busses/i2c-mv64xxx.c 		drv_data->block = 0;
block             426 drivers/i2c/busses/i2c-mv64xxx.c 		drv_data->block = 0;
block             489 drivers/i2c/busses/i2c-mv64xxx.c 	drv_data->block = 0;
block             541 drivers/i2c/busses/i2c-mv64xxx.c 		!drv_data->block, drv_data->adapter.timeout);
block             552 drivers/i2c/busses/i2c-mv64xxx.c 	if (abort && drv_data->block) {
block             557 drivers/i2c/busses/i2c-mv64xxx.c 			!drv_data->block, drv_data->adapter.timeout);
block             559 drivers/i2c/busses/i2c-mv64xxx.c 		if ((time_left <= 0) && drv_data->block) {
block             563 drivers/i2c/busses/i2c-mv64xxx.c 				"time_left: %d\n", drv_data->block,
block             582 drivers/i2c/busses/i2c-mv64xxx.c 	drv_data->block = 1;
block             652 drivers/i2c/busses/i2c-mv64xxx.c 	drv_data->block = 1;
block             222 drivers/i2c/busses/i2c-nforce2.c 			len = data->block[0];
block             231 drivers/i2c/busses/i2c-nforce2.c 				outb_p(data->block[i + 1],
block             272 drivers/i2c/busses/i2c-nforce2.c 			data->block[i + 1] = inb_p(NVIDIA_SMB_DATA + i);
block             273 drivers/i2c/busses/i2c-nforce2.c 		data->block[0] = len;
block             154 drivers/i2c/busses/i2c-opal.c 		req.buffer_ra = cpu_to_be64(__pa(&data->block[1]));
block             155 drivers/i2c/busses/i2c-opal.c 		req.size = cpu_to_be32(data->block[0]);
block             226 drivers/i2c/busses/i2c-pasemi.c 			len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX);
block             229 drivers/i2c/busses/i2c-pasemi.c 				TXFIFO_WR(smbus, data->block[i]);
block             230 drivers/i2c/busses/i2c-pasemi.c 			TXFIFO_WR(smbus, data->block[len] | MTXFIFO_STOP);
block             243 drivers/i2c/busses/i2c-pasemi.c 		len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX - 1);
block             249 drivers/i2c/busses/i2c-pasemi.c 			TXFIFO_WR(smbus, data->block[i]);
block             297 drivers/i2c/busses/i2c-pasemi.c 		data->block[0] = len;
block             304 drivers/i2c/busses/i2c-pasemi.c 			data->block[i] = rd & MRXFIFO_DATA_M;
block             559 drivers/i2c/busses/i2c-piix4.c 			len = data->block[0];
block             565 drivers/i2c/busses/i2c-piix4.c 				outb_p(data->block[i], SMBBLKDAT);
block             593 drivers/i2c/busses/i2c-piix4.c 		data->block[0] = inb_p(SMBHSTDAT0);
block             594 drivers/i2c/busses/i2c-piix4.c 		if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
block             597 drivers/i2c/busses/i2c-piix4.c 		for (i = 1; i <= data->block[0]; i++)
block             598 drivers/i2c/busses/i2c-piix4.c 			data->block[i] = inb_p(SMBBLKDAT);
block              86 drivers/i2c/busses/i2c-powermac.c 		buf = data->block;
block              87 drivers/i2c/busses/i2c-powermac.c 		len = data->block[0] + 1;
block              90 drivers/i2c/busses/i2c-powermac.c 		buf = &data->block[1];
block              91 drivers/i2c/busses/i2c-powermac.c 		len = data->block[0];
block             146 drivers/i2c/busses/i2c-scmi.c 			len = data->block[0];
block             153 drivers/i2c/busses/i2c-scmi.c 			mt_params[4].buffer.pointer = data->block + 1;
block             260 drivers/i2c/busses/i2c-scmi.c 		data->block[0] = len;
block             261 drivers/i2c/busses/i2c-scmi.c 		memcpy(data->block + 1, obj->buffer.pointer, len);
block             236 drivers/i2c/busses/i2c-sis630.c 		len = data->block[0];
block             244 drivers/i2c/busses/i2c-sis630.c 				"set data 0x%02x\n", data->block[i]);
block             246 drivers/i2c/busses/i2c-sis630.c 			sis630_write(SMB_BYTE + (i - 1) % 8, data->block[i]);
block             280 drivers/i2c/busses/i2c-sis630.c 		data->block[0] = len = 0;
block             293 drivers/i2c/busses/i2c-sis630.c 				data->block[0] = sis630_read(SMB_COUNT);
block             296 drivers/i2c/busses/i2c-sis630.c 			if (data->block[0] > 32)
block             297 drivers/i2c/busses/i2c-sis630.c 				data->block[0] = 32;
block             300 drivers/i2c/busses/i2c-sis630.c 				"block data read len=0x%x\n", data->block[0]);
block             302 drivers/i2c/busses/i2c-sis630.c 			for (i = 0; i < 8 && len < data->block[0]; i++, len++) {
block             305 drivers/i2c/busses/i2c-sis630.c 				data->block[len + 1] = sis630_read(SMB_BYTE +
block             314 drivers/i2c/busses/i2c-sis630.c 		} while (len < data->block[0]);
block             923 drivers/i2c/busses/i2c-stm32f7.c 			if (data->block[0] > I2C_SMBUS_BLOCK_MAX ||
block             924 drivers/i2c/busses/i2c-stm32f7.c 			    !data->block[0]) {
block             926 drivers/i2c/busses/i2c-stm32f7.c 					data->block[0]);
block             929 drivers/i2c/busses/i2c-stm32f7.c 			f7_msg->count = data->block[0] + 2;
block             931 drivers/i2c/busses/i2c-stm32f7.c 				f7_msg->smbus_buf[i] = data->block[i - 1];
block             944 drivers/i2c/busses/i2c-stm32f7.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX - 1) {
block             946 drivers/i2c/busses/i2c-stm32f7.c 				data->block[0]);
block             949 drivers/i2c/busses/i2c-stm32f7.c 		f7_msg->count = data->block[0] + 2;
block             951 drivers/i2c/busses/i2c-stm32f7.c 			f7_msg->smbus_buf[i] = data->block[i - 1];
block            1660 drivers/i2c/busses/i2c-stm32f7.c 			data->block[i] = f7_msg->smbus_buf[i];
block             230 drivers/i2c/busses/i2c-viapro.c 			outb_p(data->block[0], SMBHSTDAT0);
block             235 drivers/i2c/busses/i2c-viapro.c 			u8 len = data->block[0];
block             241 drivers/i2c/busses/i2c-viapro.c 				outb_p(data->block[i], SMBBLKDAT);
block             273 drivers/i2c/busses/i2c-viapro.c 		data->block[0] = inb_p(SMBHSTDAT0);
block             274 drivers/i2c/busses/i2c-viapro.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
block             275 drivers/i2c/busses/i2c-viapro.c 			data->block[0] = I2C_SMBUS_BLOCK_MAX;
block             277 drivers/i2c/busses/i2c-viapro.c 		for (i = 1; i <= data->block[0]; i++)
block             278 drivers/i2c/busses/i2c-viapro.c 			data->block[i] = inb_p(SMBBLKDAT);
block             391 drivers/i2c/busses/i2c-xgene-slimpro.c 						&data->block[0]);
block             397 drivers/i2c/busses/i2c-xgene-slimpro.c 						data->block[0] + 1,
block             398 drivers/i2c/busses/i2c-xgene-slimpro.c 						&data->block[0]);
block             409 drivers/i2c/busses/i2c-xgene-slimpro.c 						&data->block[1]);
block             414 drivers/i2c/busses/i2c-xgene-slimpro.c 						data->block[0],
block             415 drivers/i2c/busses/i2c-xgene-slimpro.c 						&data->block[1]);
block             302 drivers/i2c/busses/scx200_acb.c 		len = data->block[0];
block             305 drivers/i2c/busses/scx200_acb.c 		buffer = &data->block[1];
block            2117 drivers/i2c/i2c-core-base.c 	raw_id.block[0] = 3;
block            2124 drivers/i2c/i2c-core-base.c 	id->manufacturer_id = (raw_id.block[1] << 4) | (raw_id.block[2] >> 4);
block            2125 drivers/i2c/i2c-core-base.c 	id->part_id = ((raw_id.block[2] & 0xf) << 5) | (raw_id.block[3] >> 3);
block            2126 drivers/i2c/i2c-core-base.c 	id->die_revision = raw_id.block[3] & 0x7;
block             227 drivers/i2c/i2c-core-smbus.c 	memcpy(values, &data.block[1], data.block[0]);
block             228 drivers/i2c/i2c-core-smbus.c 	return data.block[0];
block             249 drivers/i2c/i2c-core-smbus.c 	data.block[0] = length;
block             250 drivers/i2c/i2c-core-smbus.c 	memcpy(&data.block[1], values, length);
block             266 drivers/i2c/i2c-core-smbus.c 	data.block[0] = length;
block             273 drivers/i2c/i2c-core-smbus.c 	memcpy(values, &data.block[1], data.block[0]);
block             274 drivers/i2c/i2c-core-smbus.c 	return data.block[0];
block             285 drivers/i2c/i2c-core-smbus.c 	data.block[0] = length;
block             286 drivers/i2c/i2c-core-smbus.c 	memcpy(data.block + 1, values, length);
block             392 drivers/i2c/i2c-core-smbus.c 			msg[0].len = data->block[0] + 2;
block             396 drivers/i2c/i2c-core-smbus.c 					data->block[0]);
block             402 drivers/i2c/i2c-core-smbus.c 				msg[0].buf[i] = data->block[i - 1];
block             408 drivers/i2c/i2c-core-smbus.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
block             411 drivers/i2c/i2c-core-smbus.c 				data->block[0]);
block             415 drivers/i2c/i2c-core-smbus.c 		msg[0].len = data->block[0] + 2;
block             418 drivers/i2c/i2c-core-smbus.c 			msg[0].buf[i] = data->block[i - 1];
block             426 drivers/i2c/i2c-core-smbus.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
block             429 drivers/i2c/i2c-core-smbus.c 				data->block[0]);
block             434 drivers/i2c/i2c-core-smbus.c 			msg[1].len = data->block[0];
block             437 drivers/i2c/i2c-core-smbus.c 			msg[0].len = data->block[0] + 1;
block             440 drivers/i2c/i2c-core-smbus.c 			for (i = 1; i <= data->block[0]; i++)
block             441 drivers/i2c/i2c-core-smbus.c 				msg[0].buf[i] = data->block[i];
block             493 drivers/i2c/i2c-core-smbus.c 			for (i = 0; i < data->block[0]; i++)
block             494 drivers/i2c/i2c-core-smbus.c 				data->block[i + 1] = msg[1].buf[i];
block             499 drivers/i2c/i2c-core-smbus.c 				data->block[i] = msg[1].buf[i];
block             365 drivers/i2c/i2c-dev.c 		datasize = sizeof(data->block);
block             379 drivers/i2c/i2c-dev.c 			temp.block[0] = I2C_SMBUS_BLOCK_MAX;
block              67 drivers/i2c/i2c-stub.c 	u8 block[I2C_SMBUS_BLOCK_MAX];
block             218 drivers/i2c/i2c-stub.c 		if (data->block[0] > 256 - command)	/* Avoid overrun */
block             219 drivers/i2c/i2c-stub.c 			data->block[0] = 256 - command;
block             220 drivers/i2c/i2c-stub.c 		len = data->block[0];
block             224 drivers/i2c/i2c-stub.c 				chip->words[command + i] |= data->block[1 + i];
block             231 drivers/i2c/i2c-stub.c 				data->block[1 + i] =
block             249 drivers/i2c/i2c-stub.c 			len = data->block[0];
block             266 drivers/i2c/i2c-stub.c 				b->block[i] = data->block[i + 1];
block             268 drivers/i2c/i2c-stub.c 			chip->words[command] = (b->block[0] << 8) | b->len;
block             280 drivers/i2c/i2c-stub.c 			data->block[0] = len;
block             282 drivers/i2c/i2c-stub.c 				data->block[i + 1] = b->block[i];
block             534 drivers/ide/ide-cd.c 	long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
block             546 drivers/ide/ide-cd.c 	req->cmd[2] = (block >> 24) & 0xff;
block             547 drivers/ide/ide-cd.c 	req->cmd[3] = (block >> 16) & 0xff;
block             548 drivers/ide/ide-cd.c 	req->cmd[4] = (block >>  8) & 0xff;
block             549 drivers/ide/ide-cd.c 	req->cmd[5] = block & 0xff;
block             876 drivers/ide/ide-cd.c 					sector_t block)
block             883 drivers/ide/ide-cd.c 				  rq->cmd[0], (unsigned long long)block);
block              83 drivers/ide/ide-disk.c 					sector_t block)
block              94 drivers/ide/ide-disk.c 		if (block + blk_rq_sectors(rq) > 1ULL << 28)
block             107 drivers/ide/ide-disk.c 					(unsigned long long)block);
block             110 drivers/ide/ide-disk.c 			tf->lbal   = (u8) block;
block             111 drivers/ide/ide-disk.c 			tf->lbam   = (u8)(block >>  8);
block             112 drivers/ide/ide-disk.c 			tf->lbah   = (u8)(block >> 16);
block             117 drivers/ide/ide-disk.c 			tf->lbal  = (u8)(block >> 24);
block             118 drivers/ide/ide-disk.c 			if (sizeof(block) != 4) {
block             119 drivers/ide/ide-disk.c 				tf->lbam = (u8)((u64)block >> 32);
block             120 drivers/ide/ide-disk.c 				tf->lbah = (u8)((u64)block >> 40);
block             128 drivers/ide/ide-disk.c 			tf->lbal   = block;
block             129 drivers/ide/ide-disk.c 			tf->lbam   = block >>= 8;
block             130 drivers/ide/ide-disk.c 			tf->lbah   = block >>= 8;
block             131 drivers/ide/ide-disk.c 			tf->device = ((block >> 8) & 0xf) | ATA_LBA;
block             136 drivers/ide/ide-disk.c 		track = (int)block / drive->sect;
block             137 drivers/ide/ide-disk.c 		sect  = (int)block % drive->sect + 1;
block             183 drivers/ide/ide-disk.c 				      sector_t block)
block             194 drivers/ide/ide-disk.c 		 (unsigned long long)block, blk_rq_sectors(rq));
block             199 drivers/ide/ide-disk.c 	return __ide_do_rw_disk(drive, rq, block);
block             196 drivers/ide/ide-floppy.c 	int block = sector / floppy->bs_factor;
block             200 drivers/ide/ide-floppy.c 	ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
block             205 drivers/ide/ide-floppy.c 	put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
block             230 drivers/ide/ide-floppy.c 					     struct request *rq, sector_t block)
block             267 drivers/ide/ide-floppy.c 		idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
block             567 drivers/ide/ide-tape.c 					  struct request *rq, sector_t block)
block             769 drivers/ide/ide-tape.c 		unsigned int block, u8 partition, int skip)
block             774 drivers/ide/ide-tape.c 	put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
block             802 drivers/ide/ide-tape.c static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
block             813 drivers/ide/ide-tape.c 	idetape_create_locate_cmd(drive, &pc, block, partition, skip);
block              96 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block = container_of(kref,
block              99 drivers/iio/buffer/industrialio-buffer-dma.c 	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
block             101 drivers/iio/buffer/industrialio-buffer-dma.c 	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
block             102 drivers/iio/buffer/industrialio-buffer-dma.c 					block->vaddr, block->phys_addr);
block             104 drivers/iio/buffer/industrialio-buffer-dma.c 	iio_buffer_put(&block->queue->buffer);
block             105 drivers/iio/buffer/industrialio-buffer-dma.c 	kfree(block);
block             108 drivers/iio/buffer/industrialio-buffer-dma.c static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
block             110 drivers/iio/buffer/industrialio-buffer-dma.c 	kref_get(&block->kref);
block             113 drivers/iio/buffer/industrialio-buffer-dma.c static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
block             115 drivers/iio/buffer/industrialio-buffer-dma.c 	kref_put(&block->kref, iio_buffer_block_release);
block             127 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block, *_block;
block             134 drivers/iio/buffer/industrialio-buffer-dma.c 	list_for_each_entry_safe(block, _block, &block_list, head)
block             135 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_buffer_block_release(&block->kref);
block             141 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block;
block             144 drivers/iio/buffer/industrialio-buffer-dma.c 	block = container_of(kref, struct iio_dma_buffer_block, kref);
block             147 drivers/iio/buffer/industrialio-buffer-dma.c 	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
block             156 drivers/iio/buffer/industrialio-buffer-dma.c static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
block             158 drivers/iio/buffer/industrialio-buffer-dma.c 	kref_put(&block->kref, iio_buffer_block_release_atomic);
block             169 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block;
block             171 drivers/iio/buffer/industrialio-buffer-dma.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             172 drivers/iio/buffer/industrialio-buffer-dma.c 	if (!block)
block             175 drivers/iio/buffer/industrialio-buffer-dma.c 	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
block             176 drivers/iio/buffer/industrialio-buffer-dma.c 		&block->phys_addr, GFP_KERNEL);
block             177 drivers/iio/buffer/industrialio-buffer-dma.c 	if (!block->vaddr) {
block             178 drivers/iio/buffer/industrialio-buffer-dma.c 		kfree(block);
block             182 drivers/iio/buffer/industrialio-buffer-dma.c 	block->size = size;
block             183 drivers/iio/buffer/industrialio-buffer-dma.c 	block->state = IIO_BLOCK_STATE_DEQUEUED;
block             184 drivers/iio/buffer/industrialio-buffer-dma.c 	block->queue = queue;
block             185 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&block->head);
block             186 drivers/iio/buffer/industrialio-buffer-dma.c 	kref_init(&block->kref);
block             190 drivers/iio/buffer/industrialio-buffer-dma.c 	return block;
block             193 drivers/iio/buffer/industrialio-buffer-dma.c static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
block             195 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = block->queue;
block             201 drivers/iio/buffer/industrialio-buffer-dma.c 	if (block->state != IIO_BLOCK_STATE_DEAD) {
block             202 drivers/iio/buffer/industrialio-buffer-dma.c 		block->state = IIO_BLOCK_STATE_DONE;
block             203 drivers/iio/buffer/industrialio-buffer-dma.c 		list_add_tail(&block->head, &queue->outgoing);
block             214 drivers/iio/buffer/industrialio-buffer-dma.c void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
block             216 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = block->queue;
block             220 drivers/iio/buffer/industrialio-buffer-dma.c 	_iio_dma_buffer_block_done(block);
block             223 drivers/iio/buffer/industrialio-buffer-dma.c 	iio_buffer_block_put_atomic(block);
block             241 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block, *_block;
block             245 drivers/iio/buffer/industrialio-buffer-dma.c 	list_for_each_entry_safe(block, _block, list, head) {
block             246 drivers/iio/buffer/industrialio-buffer-dma.c 		list_del(&block->head);
block             247 drivers/iio/buffer/industrialio-buffer-dma.c 		block->bytes_used = 0;
block             248 drivers/iio/buffer/industrialio-buffer-dma.c 		_iio_dma_buffer_block_done(block);
block             249 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_buffer_block_put_atomic(block);
block             257 drivers/iio/buffer/industrialio-buffer-dma.c static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
block             264 drivers/iio/buffer/industrialio-buffer-dma.c 	switch (block->state) {
block             284 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block;
block             309 drivers/iio/buffer/industrialio-buffer-dma.c 		block = queue->fileio.blocks[i];
block             312 drivers/iio/buffer/industrialio-buffer-dma.c 		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
block             313 drivers/iio/buffer/industrialio-buffer-dma.c 			block->state = IIO_BLOCK_STATE_DEAD;
block             328 drivers/iio/buffer/industrialio-buffer-dma.c 			block = queue->fileio.blocks[i];
block             329 drivers/iio/buffer/industrialio-buffer-dma.c 			if (block->state == IIO_BLOCK_STATE_DEAD) {
block             331 drivers/iio/buffer/industrialio-buffer-dma.c 				iio_buffer_block_put(block);
block             332 drivers/iio/buffer/industrialio-buffer-dma.c 				block = NULL;
block             334 drivers/iio/buffer/industrialio-buffer-dma.c 				block->size = size;
block             337 drivers/iio/buffer/industrialio-buffer-dma.c 			block = NULL;
block             340 drivers/iio/buffer/industrialio-buffer-dma.c 		if (!block) {
block             341 drivers/iio/buffer/industrialio-buffer-dma.c 			block = iio_dma_buffer_alloc_block(queue, size);
block             342 drivers/iio/buffer/industrialio-buffer-dma.c 			if (!block) {
block             346 drivers/iio/buffer/industrialio-buffer-dma.c 			queue->fileio.blocks[i] = block;
block             349 drivers/iio/buffer/industrialio-buffer-dma.c 		block->state = IIO_BLOCK_STATE_QUEUED;
block             350 drivers/iio/buffer/industrialio-buffer-dma.c 		list_add_tail(&block->head, &queue->incoming);
block             361 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block)
block             373 drivers/iio/buffer/industrialio-buffer-dma.c 	block->state = IIO_BLOCK_STATE_ACTIVE;
block             374 drivers/iio/buffer/industrialio-buffer-dma.c 	iio_buffer_block_get(block);
block             375 drivers/iio/buffer/industrialio-buffer-dma.c 	ret = queue->ops->submit(queue, block);
block             387 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_buffer_block_put(block);
block             405 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block, *_block;
block             409 drivers/iio/buffer/industrialio-buffer-dma.c 	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
block             410 drivers/iio/buffer/industrialio-buffer-dma.c 		list_del(&block->head);
block             411 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_dma_buffer_submit_block(queue, block);
block             444 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block)
block             446 drivers/iio/buffer/industrialio-buffer-dma.c 	if (block->state == IIO_BLOCK_STATE_DEAD) {
block             447 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_buffer_block_put(block);
block             449 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_dma_buffer_submit_block(queue, block);
block             451 drivers/iio/buffer/industrialio-buffer-dma.c 		block->state = IIO_BLOCK_STATE_QUEUED;
block             452 drivers/iio/buffer/industrialio-buffer-dma.c 		list_add_tail(&block->head, &queue->incoming);
block             459 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block;
block             462 drivers/iio/buffer/industrialio-buffer-dma.c 	block = list_first_entry_or_null(&queue->outgoing, struct
block             464 drivers/iio/buffer/industrialio-buffer-dma.c 	if (block != NULL) {
block             465 drivers/iio/buffer/industrialio-buffer-dma.c 		list_del(&block->head);
block             466 drivers/iio/buffer/industrialio-buffer-dma.c 		block->state = IIO_BLOCK_STATE_DEQUEUED;
block             470 drivers/iio/buffer/industrialio-buffer-dma.c 	return block;
block             486 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block;
block             495 drivers/iio/buffer/industrialio-buffer-dma.c 		block = iio_dma_buffer_dequeue(queue);
block             496 drivers/iio/buffer/industrialio-buffer-dma.c 		if (block == NULL) {
block             501 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->fileio.active_block = block;
block             503 drivers/iio/buffer/industrialio-buffer-dma.c 		block = queue->fileio.active_block;
block             507 drivers/iio/buffer/industrialio-buffer-dma.c 	if (n > block->bytes_used - queue->fileio.pos)
block             508 drivers/iio/buffer/industrialio-buffer-dma.c 		n = block->bytes_used - queue->fileio.pos;
block             510 drivers/iio/buffer/industrialio-buffer-dma.c 	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
block             517 drivers/iio/buffer/industrialio-buffer-dma.c 	if (queue->fileio.pos == block->bytes_used) {
block             519 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_dma_buffer_enqueue(queue, block);
block             541 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_block *block;
block             556 drivers/iio/buffer/industrialio-buffer-dma.c 	list_for_each_entry(block, &queue->outgoing, head)
block             557 drivers/iio/buffer/industrialio-buffer-dma.c 		data_available += block->size;
block              48 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	struct iio_dma_buffer_block *block = data;
block              51 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	spin_lock_irqsave(&block->queue->list_lock, flags);
block              52 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	list_del(&block->head);
block              53 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
block              54 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	iio_dma_buffer_block_done(block);
block              58 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	struct iio_dma_buffer_block *block)
block              65 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
block              66 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	block->bytes_used = rounddown(block->bytes_used,
block              70 drivers/iio/buffer/industrialio-buffer-dmaengine.c 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
block              76 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	desc->callback_param = block;
block              83 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	list_add_tail(&block->head, &dmaengine_buffer->active);
block             497 drivers/infiniband/hw/bnxt_re/qplib_sp.c 			 bool block)
block             531 drivers/infiniband/hw/bnxt_re/qplib_sp.c 					  NULL, block);
block             540 drivers/infiniband/hw/bnxt_re/qplib_sp.c 			   bool block)
block             553 drivers/infiniband/hw/bnxt_re/qplib_sp.c 				     block);
block             629 drivers/infiniband/hw/bnxt_re/qplib_sp.c 			 bool block)
block             641 drivers/infiniband/hw/bnxt_re/qplib_sp.c 					  (void *)&resp, NULL, block);
block             656 drivers/infiniband/hw/bnxt_re/qplib_sp.c 		      u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
block             729 drivers/infiniband/hw/bnxt_re/qplib_sp.c 					  (void *)&resp, NULL, block);
block             250 drivers/infiniband/hw/bnxt_re/qplib_sp.h 			 bool block);
block             252 drivers/infiniband/hw/bnxt_re/qplib_sp.h 			   bool block);
block             256 drivers/infiniband/hw/bnxt_re/qplib_sp.h 			 bool block);
block             258 drivers/infiniband/hw/bnxt_re/qplib_sp.h 		      u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
block              94 drivers/infiniband/hw/mlx4/mad.c 				int block, u32 change_bitmap);
block             334 drivers/infiniband/hw/mlx4/mad.c 				int block, u32 change_bitmap)
block             352 drivers/infiniband/hw/mlx4/mad.c 				    [ix] == i + 32 * block) {
block             647 drivers/input/mouse/cyapa_gen3.c 		u16 block, const u8 *data)
block             659 drivers/input/mouse/cyapa_gen3.c 	put_unaligned_be16(block, &write_block_cmd.block_num);
block             706 drivers/input/mouse/cyapa_gen3.c 		size_t block = start_block + i;
block             710 drivers/input/mouse/cyapa_gen3.c 		error = cyapa_gen3_write_fw_block(cyapa, block, data);
block            2629 drivers/isdn/hardware/mISDN/hfcmulti.c fifo_irq(struct hfc_multi *hc, int block)
block            2636 drivers/isdn/hardware/mISDN/hfcmulti.c 	r_irq_fifo_bl = HFC_inb_nodebug(hc, R_IRQ_FIFO_BL0 + block);
block            2639 drivers/isdn/hardware/mISDN/hfcmulti.c 		ch = (block << 2) + (j >> 1);
block              84 drivers/md/bcache/debug.c 			unsigned int block = ((void *) i - (void *) ondisk) /
block              87 drivers/md/bcache/debug.c 			pr_err("*** on disk block %u:\n", block);
block              88 drivers/md/bcache/debug.c 			bch_dump_bset(&b->keys, i, block);
block             137 drivers/md/dm-bufio.c 	sector_t block;
block             248 drivers/md/dm-bufio.c static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
block             256 drivers/md/dm-bufio.c 		if (b->block == block)
block             259 drivers/md/dm-bufio.c 		n = (b->block < block) ? n->rb_left : n->rb_right;
block             273 drivers/md/dm-bufio.c 		if (found->block == b->block) {
block             279 drivers/md/dm-bufio.c 		new = (found->block < b->block) ?
block             477 drivers/md/dm-bufio.c static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
block             482 drivers/md/dm-bufio.c 	b->block = block;
block             643 drivers/md/dm-bufio.c 		sector = b->block << b->c->sectors_per_block_bits;
block             645 drivers/md/dm-bufio.c 		sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
block             950 drivers/md/dm-bufio.c static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
block             958 drivers/md/dm-bufio.c 	b = __find(c, block);
block             973 drivers/md/dm-bufio.c 	b = __find(c, block);
block             985 drivers/md/dm-bufio.c 	__link_buffer(b, block, LIST_CLEAN);
block            1039 drivers/md/dm-bufio.c static void *new_read(struct dm_bufio_client *c, sector_t block,
block            1048 drivers/md/dm-bufio.c 	b = __bufio_new(c, block, nf, &need_submit, &write_list);
block            1078 drivers/md/dm-bufio.c void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
block            1081 drivers/md/dm-bufio.c 	return new_read(c, block, NF_GET, bp);
block            1085 drivers/md/dm-bufio.c void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
block            1090 drivers/md/dm-bufio.c 	return new_read(c, block, NF_READ, bp);
block            1094 drivers/md/dm-bufio.c void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
block            1099 drivers/md/dm-bufio.c 	return new_read(c, block, NF_FRESH, bp);
block            1104 drivers/md/dm-bufio.c 		       sector_t block, unsigned n_blocks)
block            1115 drivers/md/dm-bufio.c 	for (; n_blocks--; block++) {
block            1118 drivers/md/dm-bufio.c 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
block            1389 drivers/md/dm-bufio.c 		old_block = b->block;
block            1410 drivers/md/dm-bufio.c void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
block            1416 drivers/md/dm-bufio.c 	b = __find(c, block);
block            1451 drivers/md/dm-bufio.c 	return b->block;
block            1496 drivers/md/dm-bufio.c 			      (unsigned long long)b->block, b->hold_count, i);
block             724 drivers/md/dm-cache-metadata.c static __le64 pack_value(dm_oblock_t block, unsigned flags)
block             726 drivers/md/dm-cache-metadata.c 	uint64_t value = from_oblock(block);
block             732 drivers/md/dm-cache-metadata.c static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
block             736 drivers/md/dm-cache-metadata.c 	*block = to_oblock(b);
block              85 drivers/md/dm-cache-policy-smq.c static struct entry *__get_entry(struct entry_space *es, unsigned block)
block              89 drivers/md/dm-cache-policy-smq.c 	e = es->begin + block;
block             101 drivers/md/dm-cache-policy-smq.c static struct entry *to_entry(struct entry_space *es, unsigned block)
block             103 drivers/md/dm-cache-policy-smq.c 	if (block == INDEXER_NULL)
block             106 drivers/md/dm-cache-policy-smq.c 	return __get_entry(es, block);
block             815 drivers/md/dm-cache-target.c 	sector_t block = from_cblock(cblock);
block             820 drivers/md/dm-cache-target.c 			(block * cache->sectors_per_block) +
block             824 drivers/md/dm-cache-target.c 			(block << cache->sectors_per_block_shift) |
block            1157 drivers/md/dm-cache-target.c static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
block            1160 drivers/md/dm-cache-target.c 		(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
block            1695 drivers/md/dm-cache-target.c static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
block            1704 drivers/md/dm-cache-target.c 	rb = bio_detain_shared(cache, block, bio);
block            1718 drivers/md/dm-cache-target.c 	if (optimisable_bio(cache, bio, block)) {
block            1721 drivers/md/dm-cache-target.c 		r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
block            1736 drivers/md/dm-cache-target.c 		r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
block            1757 drivers/md/dm-cache-target.c 			remap_to_origin_clear_discard(cache, bio, block);
block            1780 drivers/md/dm-cache-target.c 				invalidate_start(cache, cblock, block, bio);
block            1782 drivers/md/dm-cache-target.c 				remap_to_origin_clear_discard(cache, bio, block);
block            1786 drivers/md/dm-cache-target.c 				remap_to_origin_and_cache(cache, bio, block, cblock);
block            1789 drivers/md/dm-cache-target.c 				remap_to_cache_dirty(cache, bio, block, cblock);
block            2727 drivers/md/dm-cache-target.c 	dm_oblock_t block = get_bio_block(cache, bio);
block            2730 drivers/md/dm-cache-target.c 	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
block            2746 drivers/md/dm-cache-target.c 	r = map_bio(cache, bio, block, &commit_needed);
block              77 drivers/md/dm-dust.c static int dust_remove_block(struct dust_device *dd, unsigned long long block)
block              83 drivers/md/dm-dust.c 	bblock = dust_rb_search(&dd->badblocklist, block);
block              88 drivers/md/dm-dust.c 			      __func__, block);
block              97 drivers/md/dm-dust.c 		DMINFO("%s: badblock removed at block %llu", __func__, block);
block             104 drivers/md/dm-dust.c static int dust_add_block(struct dust_device *dd, unsigned long long block)
block             117 drivers/md/dm-dust.c 	bblock->bb = block;
block             121 drivers/md/dm-dust.c 			      __func__, block);
block             130 drivers/md/dm-dust.c 		DMINFO("%s: badblock added at block %llu", __func__, block);
block             136 drivers/md/dm-dust.c static int dust_query_block(struct dust_device *dd, unsigned long long block)
block             142 drivers/md/dm-dust.c 	bblock = dust_rb_search(&dd->badblocklist, block);
block             144 drivers/md/dm-dust.c 		DMINFO("%s: block %llu found in badblocklist", __func__, block);
block             146 drivers/md/dm-dust.c 		DMINFO("%s: block %llu not found in badblocklist", __func__, block);
block             379 drivers/md/dm-dust.c 	unsigned long long tmp, block;
block             417 drivers/md/dm-dust.c 		block = tmp;
block             419 drivers/md/dm-dust.c 		if (block > size) {
block             425 drivers/md/dm-dust.c 			result = dust_add_block(dd, block);
block             427 drivers/md/dm-dust.c 			result = dust_remove_block(dd, block);
block             429 drivers/md/dm-dust.c 			result = dust_query_block(dd, block);
block             103 drivers/md/dm-era-target.c static bool writeset_marked(struct writeset *ws, dm_block_t block)
block             105 drivers/md/dm-era-target.c 	return test_bit(block, ws->bits);
block             109 drivers/md/dm-era-target.c 				   struct writeset_metadata *m, dm_block_t block,
block             118 drivers/md/dm-era-target.c 	int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
block             133 drivers/md/dm-era-target.c 				 struct writeset *ws, uint32_t block)
block             137 drivers/md/dm-era-target.c 	if (!test_and_set_bit(block, ws->bits)) {
block             138 drivers/md/dm-era-target.c 		r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
block             936 drivers/md/dm-era-target.c static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
block             943 drivers/md/dm-era-target.c 	r = writeset_marked(ws, block);
block            1531 drivers/md/dm-era-target.c 	dm_block_t block = get_block(era, bio);
block            1545 drivers/md/dm-era-target.c 	    !metadata_current_marked(era->md, block)) {
block             899 drivers/md/dm-kcopyd.c int kcopyd_cancel(struct kcopyd_job *job, int block)
block             134 drivers/md/dm-log-writes.c 	struct pending_block *block;
block             198 drivers/md/dm-log-writes.c 			       struct pending_block *block)
block             202 drivers/md/dm-log-writes.c 	for (i = 0; i < block->vec_cnt; i++) {
block             203 drivers/md/dm-log-writes.c 		if (block->vecs[i].bv_page)
block             204 drivers/md/dm-log-writes.c 			__free_page(block->vecs[i].bv_page);
block             206 drivers/md/dm-log-writes.c 	kfree(block->data);
block             207 drivers/md/dm-log-writes.c 	kfree(block);
block             332 drivers/md/dm-log-writes.c 			 struct pending_block *block, sector_t sector)
block             339 drivers/md/dm-log-writes.c 	entry.sector = cpu_to_le64(block->sector);
block             340 drivers/md/dm-log-writes.c 	entry.nr_sectors = cpu_to_le64(block->nr_sectors);
block             341 drivers/md/dm-log-writes.c 	entry.flags = cpu_to_le64(block->flags);
block             342 drivers/md/dm-log-writes.c 	entry.data_len = cpu_to_le64(block->datalen);
block             344 drivers/md/dm-log-writes.c 	metadatalen = (block->flags & LOG_MARK_FLAG) ? block->datalen : 0;
block             345 drivers/md/dm-log-writes.c 	if (write_metadata(lc, &entry, sizeof(entry), block->data,
block             347 drivers/md/dm-log-writes.c 		free_pending_block(lc, block);
block             353 drivers/md/dm-log-writes.c 	if (block->datalen && metadatalen == 0) {
block             354 drivers/md/dm-log-writes.c 		if (write_inline_data(lc, &entry, sizeof(entry), block->data,
block             355 drivers/md/dm-log-writes.c 				      block->datalen, sector)) {
block             356 drivers/md/dm-log-writes.c 			free_pending_block(lc, block);
block             363 drivers/md/dm-log-writes.c 	if (!block->vec_cnt)
block             367 drivers/md/dm-log-writes.c 	bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
block             379 drivers/md/dm-log-writes.c 	for (i = 0; i < block->vec_cnt; i++) {
block             384 drivers/md/dm-log-writes.c 		ret = bio_add_page(bio, block->vecs[i].bv_page,
block             385 drivers/md/dm-log-writes.c 				   block->vecs[i].bv_len, 0);
block             386 drivers/md/dm-log-writes.c 		if (ret != block->vecs[i].bv_len) {
block             389 drivers/md/dm-log-writes.c 			bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
block             401 drivers/md/dm-log-writes.c 			ret = bio_add_page(bio, block->vecs[i].bv_page,
block             402 drivers/md/dm-log-writes.c 					   block->vecs[i].bv_len, 0);
block             403 drivers/md/dm-log-writes.c 			if (ret != block->vecs[i].bv_len) {
block             409 drivers/md/dm-log-writes.c 		sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
block             413 drivers/md/dm-log-writes.c 	kfree(block->data);
block             414 drivers/md/dm-log-writes.c 	kfree(block);
block             418 drivers/md/dm-log-writes.c 	free_pending_block(lc, block);
block             460 drivers/md/dm-log-writes.c 		struct pending_block *block = NULL;
block             465 drivers/md/dm-log-writes.c 			block = list_first_entry(&lc->logging_blocks,
block             467 drivers/md/dm-log-writes.c 			list_del_init(&block->list);
block             472 drivers/md/dm-log-writes.c 			if (!(block->flags & LOG_DISCARD_FLAG))
block             473 drivers/md/dm-log-writes.c 				lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
block             491 drivers/md/dm-log-writes.c 			super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
block             498 drivers/md/dm-log-writes.c 		if (block) {
block             500 drivers/md/dm-log-writes.c 				ret = log_one_block(lc, block, sector);
block             509 drivers/md/dm-log-writes.c 				free_pending_block(lc, block);
block             608 drivers/md/dm-log-writes.c 	struct pending_block *block;
block             611 drivers/md/dm-log-writes.c 	block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
block             612 drivers/md/dm-log-writes.c 	if (!block) {
block             617 drivers/md/dm-log-writes.c 	block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
block             618 drivers/md/dm-log-writes.c 	if (!block->data) {
block             620 drivers/md/dm-log-writes.c 		kfree(block);
block             624 drivers/md/dm-log-writes.c 	block->datalen = strlen(block->data);
block             625 drivers/md/dm-log-writes.c 	block->flags |= LOG_MARK_FLAG;
block             627 drivers/md/dm-log-writes.c 	list_add_tail(&block->list, &lc->logging_blocks);
block             669 drivers/md/dm-log-writes.c 	struct pending_block *block;
block             679 drivers/md/dm-log-writes.c 	pb->block = NULL;
block             702 drivers/md/dm-log-writes.c 		alloc_size = struct_size(block, vecs, bio_segments(bio));
block             704 drivers/md/dm-log-writes.c 	block = kzalloc(alloc_size, GFP_NOIO);
block             705 drivers/md/dm-log-writes.c 	if (!block) {
block             712 drivers/md/dm-log-writes.c 	INIT_LIST_HEAD(&block->list);
block             713 drivers/md/dm-log-writes.c 	pb->block = block;
block             717 drivers/md/dm-log-writes.c 		block->flags |= LOG_FLUSH_FLAG;
block             719 drivers/md/dm-log-writes.c 		block->flags |= LOG_FUA_FLAG;
block             721 drivers/md/dm-log-writes.c 		block->flags |= LOG_DISCARD_FLAG;
block             723 drivers/md/dm-log-writes.c 		block->flags |= LOG_METADATA_FLAG;
block             725 drivers/md/dm-log-writes.c 	block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
block             726 drivers/md/dm-log-writes.c 	block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
block             740 drivers/md/dm-log-writes.c 		list_splice_init(&lc->unflushed_blocks, &block->list);
block             761 drivers/md/dm-log-writes.c 			free_pending_block(lc, block);
block             773 drivers/md/dm-log-writes.c 		block->vecs[i].bv_page = page;
block             774 drivers/md/dm-log-writes.c 		block->vecs[i].bv_len = bv.bv_len;
block             775 drivers/md/dm-log-writes.c 		block->vec_cnt++;
block             782 drivers/md/dm-log-writes.c 		list_splice_init(&lc->unflushed_blocks, &block->list);
block             796 drivers/md/dm-log-writes.c 	if (bio_data_dir(bio) == WRITE && pb->block) {
block             797 drivers/md/dm-log-writes.c 		struct pending_block *block = pb->block;
block             801 drivers/md/dm-log-writes.c 		if (block->flags & LOG_FLUSH_FLAG) {
block             802 drivers/md/dm-log-writes.c 			list_splice_tail_init(&block->list, &lc->logging_blocks);
block             803 drivers/md/dm-log-writes.c 			list_add_tail(&block->list, &lc->logging_blocks);
block             805 drivers/md/dm-log-writes.c 		} else if (block->flags & LOG_FUA_FLAG) {
block             806 drivers/md/dm-log-writes.c 			list_add_tail(&block->list, &lc->logging_blocks);
block             809 drivers/md/dm-log-writes.c 			list_add_tail(&block->list, &lc->unflushed_blocks);
block             906 drivers/md/dm-log-writes.c 	struct pending_block *block;
block             911 drivers/md/dm-log-writes.c 	block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
block             912 drivers/md/dm-log-writes.c 	if (!block) {
block             917 drivers/md/dm-log-writes.c 	block->data = kzalloc(bytes, GFP_KERNEL);
block             918 drivers/md/dm-log-writes.c 	if (!block->data) {
block             920 drivers/md/dm-log-writes.c 		kfree(block);
block             925 drivers/md/dm-log-writes.c 	if (!copy_from_iter(block->data, bytes, i)) {
block             927 drivers/md/dm-log-writes.c 		kfree(block->data);
block             928 drivers/md/dm-log-writes.c 		kfree(block);
block             935 drivers/md/dm-log-writes.c 	block->datalen = bytes;
block             936 drivers/md/dm-log-writes.c 	block->sector = bio_to_dev_sectors(lc, sector);
block             937 drivers/md/dm-log-writes.c 	block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
block             941 drivers/md/dm-log-writes.c 	list_add_tail(&block->list, &lc->unflushed_blocks);
block             668 drivers/md/dm-log.c static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
block            1484 drivers/md/dm-thin-metadata.c 	result->block = exception_block;
block            1488 drivers/md/dm-thin-metadata.c static int __find_block(struct dm_thin_device *td, dm_block_t block,
block            1494 drivers/md/dm-thin-metadata.c 	dm_block_t keys[2] = { td->id, block };
block            1509 drivers/md/dm-thin-metadata.c int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
block            1521 drivers/md/dm-thin-metadata.c 	r = __find_block(td, block, can_issue_io, result);
block            1527 drivers/md/dm-thin-metadata.c static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
block            1534 drivers/md/dm-thin-metadata.c 	dm_block_t keys[2] = { td->id, block };
block            1563 drivers/md/dm-thin-metadata.c 	*pool_begin = lookup.block;
block            1577 drivers/md/dm-thin-metadata.c 		if ((lookup.block != pool_end) ||
block            1607 drivers/md/dm-thin-metadata.c static int __insert(struct dm_thin_device *td, dm_block_t block,
block            1613 drivers/md/dm-thin-metadata.c 	dm_block_t keys[2] = { td->id, block };
block            1630 drivers/md/dm-thin-metadata.c int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
block            1637 drivers/md/dm-thin-metadata.c 		r = __insert(td, block, data_block);
block            1643 drivers/md/dm-thin-metadata.c static int __remove(struct dm_thin_device *td, dm_block_t block)
block            1647 drivers/md/dm-thin-metadata.c 	dm_block_t keys[2] = { td->id, block };
block            1718 drivers/md/dm-thin-metadata.c int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
block            1724 drivers/md/dm-thin-metadata.c 		r = __remove(td, block);
block             136 drivers/md/dm-thin-metadata.h 	dm_block_t block;
block             146 drivers/md/dm-thin-metadata.h int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
block             166 drivers/md/dm-thin-metadata.h int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
block             169 drivers/md/dm-thin-metadata.h int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
block             721 drivers/md/dm-thin.c static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
block             729 drivers/md/dm-thin.c 			(block << pool->sectors_per_block_shift) |
block             732 drivers/md/dm-thin.c 		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
block             794 drivers/md/dm-thin.c 			    dm_block_t block)
block             796 drivers/md/dm-thin.c 	remap(tc, bio, block);
block             931 drivers/md/dm-thin.c 				     dm_block_t block)
block             952 drivers/md/dm-thin.c 		remap_and_issue(info.tc, bio, block);
block            1792 drivers/md/dm-thin.c static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
block            1804 drivers/md/dm-thin.c 		schedule_internal_copy(tc, block, lookup_result->block,
block            1842 drivers/md/dm-thin.c 					dm_block_t block)
block            1858 drivers/md/dm-thin.c 		remap_and_issue(tc, bio, block);
block            1862 drivers/md/dm-thin.c 			       dm_block_t block,
block            1874 drivers/md/dm-thin.c 	build_data_key(tc->td, lookup_result->block, &key);
block            1881 drivers/md/dm-thin.c 		break_sharing(tc, bio, block, &key, lookup_result, data_cell);
block            1888 drivers/md/dm-thin.c 		remap_and_issue(tc, bio, lookup_result->block);
block            1890 drivers/md/dm-thin.c 		remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
block            1891 drivers/md/dm-thin.c 		remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
block            1895 drivers/md/dm-thin.c static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
block            1927 drivers/md/dm-thin.c 			schedule_external_copy(tc, block, data_block, cell, bio);
block            1929 drivers/md/dm-thin.c 			schedule_zero(tc, block, data_block, cell, bio);
block            1949 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
block            1957 drivers/md/dm-thin.c 	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
block            1961 drivers/md/dm-thin.c 			process_shared_bio(tc, bio, block, &lookup_result, cell);
block            1964 drivers/md/dm-thin.c 			remap_and_issue(tc, bio, lookup_result.block);
block            1965 drivers/md/dm-thin.c 			inc_remap_and_issue_cell(tc, cell, lookup_result.block);
block            1987 drivers/md/dm-thin.c 			provision_block(tc, bio, block, cell);
block            2002 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
block            2010 drivers/md/dm-thin.c 	build_virtual_key(tc->td, block, &key);
block            2022 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
block            2025 drivers/md/dm-thin.c 	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
block            2034 drivers/md/dm-thin.c 			remap_and_issue(tc, bio, lookup_result.block);
block            2036 drivers/md/dm-thin.c 				inc_remap_and_issue_cell(tc, cell, lookup_result.block);
block            2721 drivers/md/dm-thin.c 	dm_block_t block = get_bio_block(tc, bio);
block            2749 drivers/md/dm-thin.c 	build_virtual_key(tc->td, block, &key);
block            2753 drivers/md/dm-thin.c 	r = dm_thin_find_block(td, block, 0, &result);
block            2779 drivers/md/dm-thin.c 		build_data_key(tc->td, result.block, &key);
block            2789 drivers/md/dm-thin.c 		remap(tc, bio, result.block);
block              64 drivers/md/dm-verity-fec.c 	u64 position, block;
block              68 drivers/md/dm-verity-fec.c 	block = position >> v->data_dev_block_bits;
block              69 drivers/md/dm-verity-fec.c 	*offset = (unsigned)(position - (block << v->data_dev_block_bits));
block              71 drivers/md/dm-verity-fec.c 	res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
block              75 drivers/md/dm-verity-fec.c 		      (unsigned long long)(v->fec->start + block),
block             131 drivers/md/dm-verity-fec.c 	u8 *par, *block;
block             142 drivers/md/dm-verity-fec.c 		block = fec_buffer_rs_block(v, fio, n, i);
block             143 drivers/md/dm-verity-fec.c 		res = fec_decode_rs8(v, fio, block, &par[offset], neras);
block             150 drivers/md/dm-verity-fec.c 		fio->output[block_offset] = block[byte_index];
block             209 drivers/md/dm-verity-fec.c 	u64 block, ileaved;
block             234 drivers/md/dm-verity-fec.c 		block = ileaved >> v->data_dev_block_bits;
block             237 drivers/md/dm-verity-fec.c 		if (block >= v->data_blocks) {
block             238 drivers/md/dm-verity-fec.c 			block -= v->data_blocks;
block             244 drivers/md/dm-verity-fec.c 			if (unlikely(block >= v->fec->hash_blocks))
block             247 drivers/md/dm-verity-fec.c 			block += v->hash_start;
block             251 drivers/md/dm-verity-fec.c 		bbuf = dm_bufio_read(bufio, block, &buf);
block             256 drivers/md/dm-verity-fec.c 				     (unsigned long long)block, PTR_ERR(bbuf));
block             267 drivers/md/dm-verity-fec.c 		    verity_hash_for_block(v, io, block, want_digest,
block             420 drivers/md/dm-verity-fec.c 		      enum verity_block_type type, sector_t block, u8 *dest,
block             438 drivers/md/dm-verity-fec.c 		block = block - v->hash_start + v->data_blocks;
block             450 drivers/md/dm-verity-fec.c 	offset = block << v->data_dev_block_bits;
block              71 drivers/md/dm-verity-fec.h 			     enum verity_block_type type, sector_t block,
block             102 drivers/md/dm-verity-fec.h 				    sector_t block, u8 *dest,
block              46 drivers/md/dm-verity-target.c 	sector_t block;
block              90 drivers/md/dm-verity-target.c static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
block              93 drivers/md/dm-verity-target.c 	return block >> (level * v->hash_per_block_bits);
block             191 drivers/md/dm-verity-target.c static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
block             194 drivers/md/dm-verity-target.c 	sector_t position = verity_position_at_level(v, block, level);
block             213 drivers/md/dm-verity-target.c 			     unsigned long long block)
block             240 drivers/md/dm-verity-target.c 		    type_str, block);
block             246 drivers/md/dm-verity-target.c 		DM_VERITY_ENV_VAR_NAME, type, block);
block             272 drivers/md/dm-verity-target.c 			       sector_t block, int level, bool skip_unverified,
block             282 drivers/md/dm-verity-target.c 	verity_hash_at_level(v, block, level, &hash_block, &offset);
block             331 drivers/md/dm-verity-target.c 			  sector_t block, u8 *digest, bool *is_zero)
block             343 drivers/md/dm-verity-target.c 		r = verity_verify_level(v, io, block, 0, true, digest);
block             351 drivers/md/dm-verity-target.c 		r = verity_verify_level(v, io, block, i, false, digest);
block             477 drivers/md/dm-verity-target.c 		sector_t cur_block = io->block + b;
block             587 drivers/md/dm-verity-target.c 		verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
block             588 drivers/md/dm-verity-target.c 		verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
block             624 drivers/md/dm-verity-target.c 	pw->block = io->block;
block             659 drivers/md/dm-verity-target.c 	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
block              76 drivers/md/dm-verity.h 	sector_t block;
block             129 drivers/md/dm-verity.h 				 sector_t block, u8 *digest, bool *is_zero);
block             538 drivers/md/dm-writecache.c 					      uint64_t block, int flags)
block             548 drivers/md/dm-writecache.c 		if (read_original_sector(wc, e) == block)
block             551 drivers/md/dm-writecache.c 		node = (read_original_sector(wc, e) >= block ?
block             556 drivers/md/dm-writecache.c 			if (read_original_sector(wc, e) >= block) {
block             577 drivers/md/dm-writecache.c 		if (read_original_sector(wc, e2) != block)
block             124 drivers/md/dm-zoned-metadata.c 	sector_t		block;
block             404 drivers/md/dm-zoned-metadata.c 	sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
block             442 drivers/md/dm-zoned-metadata.c 	bio->bi_iter.bi_sector = dmz_blk2sect(block);
block             582 drivers/md/dm-zoned-metadata.c 	sector_t block = zmd->sb[set].block + mblk->no;
block             596 drivers/md/dm-zoned-metadata.c 	bio->bi_iter.bi_sector = dmz_blk2sect(block);
block             610 drivers/md/dm-zoned-metadata.c static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
block             623 drivers/md/dm-zoned-metadata.c 	bio->bi_iter.bi_sector = dmz_blk2sect(block);
block             640 drivers/md/dm-zoned-metadata.c 	sector_t block = zmd->sb[set].block;
block             651 drivers/md/dm-zoned-metadata.c 	sb->sb_block = cpu_to_le64(block);
block             662 drivers/md/dm-zoned-metadata.c 	ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
block             900 drivers/md/dm-zoned-metadata.c 	return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
block             924 drivers/md/dm-zoned-metadata.c 	zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
block             930 drivers/md/dm-zoned-metadata.c 		zmd->sb[1].block += zone_nr_blocks;
block             978 drivers/md/dm-zoned-metadata.c 		zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
block             980 drivers/md/dm-zoned-metadata.c 		zmd->sb[1].block = zmd->sb[0].block +
block             991 drivers/md/dm-zoned-metadata.c 				     zmd->sb[src_set].block + i, page);
block             995 drivers/md/dm-zoned-metadata.c 				     zmd->sb[dst_set].block + i, page);
block            1027 drivers/md/dm-zoned-metadata.c 	zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
block            1039 drivers/md/dm-zoned-metadata.c 		zmd->sb[1].block = zmd->sb[0].block +
block              59 drivers/md/dm-zoned-reclaim.c 				sector_t block)
block              66 drivers/md/dm-zoned-reclaim.c 	if (wp_block == block)
block              69 drivers/md/dm-zoned-reclaim.c 	if (wp_block > block)
block              76 drivers/md/dm-zoned-reclaim.c 	nr_blocks = block - wp_block;
block              84 drivers/md/dm-zoned-reclaim.c 			    (unsigned long long)block, nr_blocks, ret);
block              89 drivers/md/dm-zoned-reclaim.c 	zone->wp_block = block;
block             121 drivers/md/dm-zoned-reclaim.c 	sector_t block = 0, end_block;
block             138 drivers/md/dm-zoned-reclaim.c 	while (block < end_block) {
block             143 drivers/md/dm-zoned-reclaim.c 		ret = dmz_first_valid_block(zmd, src_zone, &block);
block             154 drivers/md/dm-zoned-reclaim.c 			ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
block             160 drivers/md/dm-zoned-reclaim.c 		src.sector = dmz_blk2sect(src_zone_block + block);
block             164 drivers/md/dm-zoned-reclaim.c 		dst.sector = dmz_blk2sect(dst_zone_block + block);
block             178 drivers/md/dm-zoned-reclaim.c 		block += nr_blocks;
block             180 drivers/md/dm-zoned-reclaim.c 			dst_zone->wp_block = block;
block             346 drivers/md/dm-zoned-target.c 	sector_t block = dmz_bio_block(bio);
block             348 drivers/md/dm-zoned-target.c 	sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
block             367 drivers/md/md-bitmap.c 	sector_t block;
block             378 drivers/md/md-bitmap.c 	block = index << (PAGE_SHIFT - inode->i_blkbits);
block             383 drivers/md/md-bitmap.c 			bh->b_blocknr = bmap(inode, block);
block             402 drivers/md/md-bitmap.c 		block++;
block             927 drivers/md/md-bitmap.c static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
block             932 drivers/md/md-bitmap.c 	unsigned long chunk = block >> bitmap->counts.chunkshift;
block             956 drivers/md/md-bitmap.c static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
block             961 drivers/md/md-bitmap.c 	unsigned long chunk = block >> bitmap->counts.chunkshift;
block             984 drivers/md/md-bitmap.c static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
block             989 drivers/md/md-bitmap.c 	unsigned long chunk = block >> bitmap->counts.chunkshift;
block            1295 drivers/md/md-bitmap.c 		sector_t  block = (sector_t)j << counts->chunkshift;
block            1306 drivers/md/md-bitmap.c 		bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
block            1314 drivers/md/md-bitmap.c 			md_bitmap_count_page(counts, block, -1);
block            1315 drivers/md/md-bitmap.c 			md_bitmap_file_clear_bit(bitmap, block);
block            1318 drivers/md/md-bitmap.c 			md_bitmap_set_pending(counts, block);
block            1985 drivers/md/md-bitmap.c 	sector_t block, lo = 0, hi = 0;
block            1997 drivers/md/md-bitmap.c 		block = (sector_t)j << counts->chunkshift;
block            1998 drivers/md/md-bitmap.c 		if (md_bitmap_file_test_bit(bitmap, block)) {
block            2000 drivers/md/md-bitmap.c 				lo = block;
block            2001 drivers/md/md-bitmap.c 			hi = block;
block            2002 drivers/md/md-bitmap.c 			md_bitmap_file_clear_bit(bitmap, block);
block            2003 drivers/md/md-bitmap.c 			md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
block            2004 drivers/md/md-bitmap.c 			md_bitmap_file_set_bit(mddev->bitmap, block);
block            2069 drivers/md/md-bitmap.c 	sector_t block;
block            2188 drivers/md/md-bitmap.c 	for (block = 0; block < blocks; ) {
block            2192 drivers/md/md-bitmap.c 		bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
block            2196 drivers/md/md-bitmap.c 			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
block            2199 drivers/md/md-bitmap.c 				sector_t end = block + new_blocks;
block            2200 drivers/md/md-bitmap.c 				sector_t start = block >> chunkshift;
block            2203 drivers/md/md-bitmap.c 					md_bitmap_file_set_bit(bitmap, block);
block            2207 drivers/md/md-bitmap.c 				md_bitmap_count_page(&bitmap->counts, block, 1);
block            2208 drivers/md/md-bitmap.c 				md_bitmap_set_pending(&bitmap->counts, block);
block            2214 drivers/md/md-bitmap.c 		block += old_blocks;
block            2227 drivers/md/md-bitmap.c 		while (block < (chunks << chunkshift)) {
block            2229 drivers/md/md-bitmap.c 			bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
block            2236 drivers/md/md-bitmap.c 					md_bitmap_count_page(&bitmap->counts, block, 1);
block            2237 drivers/md/md-bitmap.c 					md_bitmap_set_pending(&bitmap->counts, block);
block            2240 drivers/md/md-bitmap.c 			block += new_blocks;
block             154 drivers/md/persistent-data/dm-array.c 			struct dm_block **block, struct array_block **ab)
block             158 drivers/md/persistent-data/dm-array.c 	r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
block             162 drivers/md/persistent-data/dm-array.c 	(*ab) = dm_block_data(*block);
block             221 drivers/md/persistent-data/dm-array.c 		      struct dm_block **block, struct array_block **ab)
block             225 drivers/md/persistent-data/dm-array.c 	r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
block             229 drivers/md/persistent-data/dm-array.c 	*ab = dm_block_data(*block);
block             236 drivers/md/persistent-data/dm-array.c static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
block             238 drivers/md/persistent-data/dm-array.c 	dm_tm_unlock(info->btree_info.tm, block);
block             254 drivers/md/persistent-data/dm-array.c 			 unsigned index, struct dm_block **block,
block             265 drivers/md/persistent-data/dm-array.c 	return get_ablock(info, le64_to_cpu(block_le), block, ab);
block             272 drivers/md/persistent-data/dm-array.c 			 struct dm_block *block, dm_block_t *root)
block             274 drivers/md/persistent-data/dm-array.c 	__le64 block_le = cpu_to_le64(dm_block_location(block));
block             283 drivers/md/persistent-data/dm-array.c 			   struct dm_block **block, struct array_block **ab)
block             287 drivers/md/persistent-data/dm-array.c 				   &array_validator, block, &inc);
block             291 drivers/md/persistent-data/dm-array.c 	*ab = dm_block_data(*block);
block             303 drivers/md/persistent-data/dm-array.c 			     struct dm_block *block, dm_block_t b,
block             308 drivers/md/persistent-data/dm-array.c 	if (dm_block_location(block) != b) {
block             316 drivers/md/persistent-data/dm-array.c 		r = insert_ablock(info, index, block, root);
block             328 drivers/md/persistent-data/dm-array.c 			 unsigned index, struct dm_block **block,
block             341 drivers/md/persistent-data/dm-array.c 	r = __shadow_ablock(info, b, block, ab);
block             345 drivers/md/persistent-data/dm-array.c 	return __reinsert_ablock(info, index, *block, b, root);
block             357 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             360 drivers/md/persistent-data/dm-array.c 	r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
block             365 drivers/md/persistent-data/dm-array.c 	r = insert_ablock(info, block_index, block, root);
block             366 drivers/md/persistent-data/dm-array.c 	unlock_ablock(info, block);
block             469 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             491 drivers/md/persistent-data/dm-array.c 				  resize->new_nr_full_blocks, &block, &ab);
block             496 drivers/md/persistent-data/dm-array.c 		unlock_ablock(resize->info, block);
block             508 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             512 drivers/md/persistent-data/dm-array.c 			  resize->old_nr_full_blocks, &block, &ab);
block             517 drivers/md/persistent-data/dm-array.c 	unlock_ablock(resize->info, block);
block             591 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             610 drivers/md/persistent-data/dm-array.c 		r = get_ablock(info, b, &block, &ab);
block             618 drivers/md/persistent-data/dm-array.c 		unlock_ablock(info, block);
block             725 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             738 drivers/md/persistent-data/dm-array.c 		r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
block             746 drivers/md/persistent-data/dm-array.c 			unlock_ablock(info, block);
block             750 drivers/md/persistent-data/dm-array.c 		r = insert_ablock(info, block_index, block, root);
block             751 drivers/md/persistent-data/dm-array.c 		unlock_ablock(info, block);
block             772 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             780 drivers/md/persistent-data/dm-array.c 	r = lookup_ablock(info, root, index / max_entries, &block, &ab);
block             791 drivers/md/persistent-data/dm-array.c 	unlock_ablock(info, block);
block             800 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             811 drivers/md/persistent-data/dm-array.c 	r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
block             833 drivers/md/persistent-data/dm-array.c 	unlock_ablock(info, block);
block             863 drivers/md/persistent-data/dm-array.c 	struct dm_block *block;
block             867 drivers/md/persistent-data/dm-array.c 	r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
block             881 drivers/md/persistent-data/dm-array.c 	unlock_ablock(wi->info, block);
block             907 drivers/md/persistent-data/dm-array.c 	if (c->block)
block             908 drivers/md/persistent-data/dm-array.c 		unlock_ablock(c->info, c->block);
block             910 drivers/md/persistent-data/dm-array.c 	c->block = NULL;
block             920 drivers/md/persistent-data/dm-array.c 		r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
block             949 drivers/md/persistent-data/dm-array.c 	if (c->block) {
block             950 drivers/md/persistent-data/dm-array.c 		unlock_ablock(c->info, c->block);
block             960 drivers/md/persistent-data/dm-array.c 	if (!c->block)
block             199 drivers/md/persistent-data/dm-array.h 	struct dm_block *block;
block             138 drivers/md/persistent-data/dm-btree-remove.c 	struct dm_block *block;
block             153 drivers/md/persistent-data/dm-btree-remove.c 			       &result->block, &inc);
block             157 drivers/md/persistent-data/dm-btree-remove.c 	result->n = dm_block_data(result->block);
block             163 drivers/md/persistent-data/dm-btree-remove.c 		cpu_to_le64(dm_block_location(result->block));
block             170 drivers/md/persistent-data/dm-btree-remove.c 	dm_tm_unlock(info->tm, c->block);
block             226 drivers/md/persistent-data/dm-btree-remove.c 		dm_tm_dec(info->tm, dm_block_location(r->block));
block             293 drivers/md/persistent-data/dm-btree-remove.c 	dm_tm_dec(info->tm, dm_block_location(c->block));
block             172 drivers/md/persistent-data/dm-btree-spine.c 	struct dm_block *block;
block             175 drivers/md/persistent-data/dm-btree-spine.c 	block = s->nodes[s->count - 1];
block             177 drivers/md/persistent-data/dm-btree-spine.c 	return dm_block_data(block);
block             344 drivers/md/persistent-data/dm-btree.c static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
block             352 drivers/md/persistent-data/dm-btree.c 		r = ro_step(s, block);
block             364 drivers/md/persistent-data/dm-btree.c 			block = value64(ro_node(s), i);
block             769 drivers/md/persistent-data/dm-btree.c 	dm_block_t block = root;
block             778 drivers/md/persistent-data/dm-btree.c 		r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
block             802 drivers/md/persistent-data/dm-btree.c 			block = value64(n, index);
block             805 drivers/md/persistent-data/dm-btree.c 	r = btree_insert_raw(&spine, block, &info->value_type,
block             868 drivers/md/persistent-data/dm-btree.c static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
block             875 drivers/md/persistent-data/dm-btree.c 		r = ro_step(s, block);
block             893 drivers/md/persistent-data/dm-btree.c 				block = value64(ro_node(s), i);
block             895 drivers/md/persistent-data/dm-btree.c 				block = value64(ro_node(s), 0);
block             901 drivers/md/persistent-data/dm-btree.c 		*next_block = block;
block             949 drivers/md/persistent-data/dm-btree.c static int walk_node(struct dm_btree_info *info, dm_block_t block,
block             959 drivers/md/persistent-data/dm-btree.c 	r = bn_read_lock(info, block, &node);
block             586 drivers/md/persistent-data/dm-space-map-common.c 	struct dm_block *block;
block             589 drivers/md/persistent-data/dm-space-map-common.c 			    &index_validator, &block);
block             593 drivers/md/persistent-data/dm-space-map-common.c 	memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
block             594 drivers/md/persistent-data/dm-space-map-common.c 	dm_tm_unlock(ll->tm, block);
block              92 drivers/md/persistent-data/dm-space-map-metadata.c 	dm_block_t block;
block             133 drivers/md/persistent-data/dm-space-map-metadata.c 	bop->block = b;
block             149 drivers/md/persistent-data/dm-space-map-metadata.c 	result->block = bop->block;
block             200 drivers/md/persistent-data/dm-space-map-metadata.c 		r = sm_ll_inc(&smm->ll, op->block, &ev);
block             204 drivers/md/persistent-data/dm-space-map-metadata.c 		r = sm_ll_dec(&smm->ll, op->block, &ev);
block             317 drivers/md/persistent-data/dm-space-map-metadata.c 		if (op->block != b)
block             358 drivers/md/persistent-data/dm-space-map-metadata.c 		if (op->block != b)
block             711 drivers/md/raid5-cache.c 	struct r5l_meta_block *block;
block             719 drivers/md/raid5-cache.c 	block = page_address(io->meta_page);
block             720 drivers/md/raid5-cache.c 	block->meta_size = cpu_to_le32(io->meta_offset);
block             721 drivers/md/raid5-cache.c 	crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block             722 drivers/md/raid5-cache.c 	block->checksum = cpu_to_le32(crc);
block             770 drivers/md/raid5-cache.c 	struct r5l_meta_block *block;
block             784 drivers/md/raid5-cache.c 	block = page_address(io->meta_page);
block             785 drivers/md/raid5-cache.c 	clear_page(block);
block             786 drivers/md/raid5-cache.c 	block->magic = cpu_to_le32(R5LOG_MAGIC);
block             787 drivers/md/raid5-cache.c 	block->version = R5LOG_VERSION;
block             788 drivers/md/raid5-cache.c 	block->seq = cpu_to_le64(log->seq);
block             789 drivers/md/raid5-cache.c 	block->position = cpu_to_le64(log->log_start);
block             747 drivers/media/cec/cec-adap.c 			struct cec_fh *fh, bool block)
block             782 drivers/media/cec/cec-adap.c 			!block ? ", nb" : "");
block             785 drivers/media/cec/cec-adap.c 			__func__, msg->len, msg->msg, !block ? " (nb)" : "");
block             889 drivers/media/cec/cec-adap.c 	data->blocking = block;
block             903 drivers/media/cec/cec-adap.c 	if (!block)
block             927 drivers/media/cec/cec-adap.c 		     bool block)
block             932 drivers/media/cec/cec-adap.c 	ret = cec_transmit_msg_fh(adap, msg, NULL, block);
block            1528 drivers/media/cec/cec-adap.c static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
block            1541 drivers/media/cec/cec-adap.c 	} else if (block) {
block            1552 drivers/media/cec/cec-adap.c void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
block            1602 drivers/media/cec/cec-adap.c 		cec_claim_log_addrs(adap, block);
block            1605 drivers/media/cec/cec-adap.c void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
block            1611 drivers/media/cec/cec-adap.c 	__cec_s_phys_addr(adap, phys_addr, block);
block            1653 drivers/media/cec/cec-adap.c 		      struct cec_log_addrs *log_addrs, bool block)
block            1805 drivers/media/cec/cec-adap.c 		cec_claim_log_addrs(adap, block);
block            1810 drivers/media/cec/cec-adap.c 		    struct cec_log_addrs *log_addrs, bool block)
block            1815 drivers/media/cec/cec-adap.c 	err = __cec_s_log_addrs(adap, log_addrs, block);
block             122 drivers/media/cec/cec-api.c 				 bool block, __u16 __user *parg)
block             139 drivers/media/cec/cec-api.c 		__cec_s_phys_addr(adap, phys_addr, block);
block             162 drivers/media/cec/cec-api.c 				 bool block, struct cec_log_addrs __user *parg)
block             178 drivers/media/cec/cec-api.c 		err = __cec_s_log_addrs(adap, &log_addrs, block);
block             191 drivers/media/cec/cec-api.c 			 bool block, struct cec_msg __user *parg)
block             209 drivers/media/cec/cec-api.c 		err = cec_transmit_msg_fh(adap, &msg, fh, block);
block             219 drivers/media/cec/cec-api.c static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
block             247 drivers/media/cec/cec-api.c 		if (!block)
block             270 drivers/media/cec/cec-api.c 			bool block, struct cec_msg __user *parg)
block             278 drivers/media/cec/cec-api.c 	err = cec_receive_msg(fh, &msg, block);
block             288 drivers/media/cec/cec-api.c 			bool block, struct cec_event __user *parg)
block             297 drivers/media/cec/cec-api.c 	while (!fh->total_queued_events && block) {
block             487 drivers/media/cec/cec-api.c 	bool block = !(filp->f_flags & O_NONBLOCK);
block             501 drivers/media/cec/cec-api.c 		return cec_adap_s_phys_addr(adap, fh, block, parg);
block             507 drivers/media/cec/cec-api.c 		return cec_adap_s_log_addrs(adap, fh, block, parg);
block             510 drivers/media/cec/cec-api.c 		return cec_transmit(adap, fh, block, parg);
block             513 drivers/media/cec/cec-api.c 		return cec_receive(adap, fh, block, parg);
block             516 drivers/media/cec/cec-api.c 		return cec_dqevent(adap, fh, block, parg);
block              40 drivers/media/cec/cec-priv.h void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
block              42 drivers/media/cec/cec-priv.h 		      struct cec_log_addrs *log_addrs, bool block);
block              44 drivers/media/cec/cec-priv.h 			struct cec_fh *fh, bool block);
block             666 drivers/media/dvb-frontends/dib9000.c 	u16 *block;
block             669 drivers/media/dvb-frontends/dib9000.c 		block = state->platform.risc.message_cache[i];
block             670 drivers/media/dvb-frontends/dib9000.c 		if (*block == 0) {
block             671 drivers/media/dvb-frontends/dib9000.c 			size = dib9000_mbx_read(state, block, 1, attr);
block             675 drivers/media/dvb-frontends/dib9000.c 			switch (*block >> 8) {
block             677 drivers/media/dvb-frontends/dib9000.c 				dib9000_risc_debug_buf(state, block + 1, size);	/* debug-messages are going to be printed right away */
block             678 drivers/media/dvb-frontends/dib9000.c 				*block = 0;	/* free the block */
block             682 drivers/media/dvb-frontends/dib9000.c 				dib9000_risc_data_process(state, block + 1, size);
block             683 drivers/media/dvb-frontends/dib9000.c 				*block = 0;
block             731 drivers/media/dvb-frontends/dib9000.c 	u16 *block;
block             738 drivers/media/dvb-frontends/dib9000.c 			block = state->platform.risc.message_cache[i];
block             739 drivers/media/dvb-frontends/dib9000.c 			if ((*block >> 8) == id) {
block             740 drivers/media/dvb-frontends/dib9000.c 				*size = (*block & 0xff) - 1;
block             741 drivers/media/dvb-frontends/dib9000.c 				memcpy(msg, block + 1, (*size) * 2);
block             742 drivers/media/dvb-frontends/dib9000.c 				*block = 0;	/* free the block */
block             225 drivers/media/i2c/adv7511-v4l2.c 	data.block[0] = length;
block             230 drivers/media/i2c/adv7511-v4l2.c 	memcpy(values, data.block + 1, length);
block             353 drivers/media/i2c/adv7842.c 	data.block[0] = length;
block             354 drivers/media/i2c/adv7842.c 	memcpy(data.block + 1, values, length);
block             257 drivers/media/platform/omap3isp/isppreview.c 		const __u32 *block = cfa->table[order[i]];
block             260 drivers/media/platform/omap3isp/isppreview.c 			isp_reg_writel(isp, block[j], OMAP3_ISP_IOMEM_PREV,
block              56 drivers/media/platform/vicodec/codec-fwht.c 	s16 block[8 * 8];
block              57 drivers/media/platform/vicodec/codec-fwht.c 	s16 *wp = block;
block              74 drivers/media/platform/vicodec/codec-fwht.c 	for (i = 63; i >= 0 && !block[zigzag[i]]; i--)
block              88 drivers/media/platform/vicodec/codec-fwht.c 		while ((tmp = block[zigzag[i]]) == 0 && cnt < 14) {
block             120 drivers/media/platform/vicodec/codec-fwht.c 	s16 block[8 * 8 + 16];
block             121 drivers/media/platform/vicodec/codec-fwht.c 	s16 *wp = block;
block             160 drivers/media/platform/vicodec/codec-fwht.c 	wp = block;
block             247 drivers/media/platform/vicodec/codec-fwht.c static void noinline_for_stack fwht(const u8 *block, s16 *output_block,
block             253 drivers/media/platform/vicodec/codec-fwht.c 	const u8 *tmp = block;
block             382 drivers/media/platform/vicodec/codec-fwht.c fwht16(const s16 *block, s16 *output_block, int stride, int intra)
block             386 drivers/media/platform/vicodec/codec-fwht.c 	const s16 *tmp = block;
block             466 drivers/media/platform/vicodec/codec-fwht.c ifwht(const s16 *block, s16 *output_block, int intra)
block             474 drivers/media/platform/vicodec/codec-fwht.c 	const s16 *tmp = block;
block             100 drivers/media/platform/vivid/vivid-radio-rx.c 				rds.block |= V4L2_RDS_BLOCK_CORRECTED;
block             103 drivers/media/platform/vivid/vivid-radio-rx.c 				rds.block |= V4L2_RDS_BLOCK_INVALID;
block             106 drivers/media/platform/vivid/vivid-radio-rx.c 				rds.block |= V4L2_RDS_BLOCK_ERROR;
block              84 drivers/media/platform/vivid/vivid-radio-tx.c 		if ((rds.block & V4L2_RDS_BLOCK_MSK) == V4L2_RDS_BLOCK_INVALID ||
block              85 drivers/media/platform/vivid/vivid-radio-tx.c 		    (rds.block & V4L2_RDS_BLOCK_ERROR))
block              87 drivers/media/platform/vivid/vivid-radio-tx.c 		rds.block &= V4L2_RDS_BLOCK_MSK;
block              55 drivers/media/platform/vivid/vivid-rds-gen.c 		data[0].block = V4L2_RDS_BLOCK_A | (V4L2_RDS_BLOCK_A << 3);
block              58 drivers/media/platform/vivid/vivid-rds-gen.c 		data[1].block = V4L2_RDS_BLOCK_B | (V4L2_RDS_BLOCK_B << 3);
block              59 drivers/media/platform/vivid/vivid-rds-gen.c 		data[3].block = V4L2_RDS_BLOCK_D | (V4L2_RDS_BLOCK_D << 3);
block              71 drivers/media/platform/vivid/vivid-rds-gen.c 			data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
block              82 drivers/media/platform/vivid/vivid-rds-gen.c 			data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
block             108 drivers/media/platform/vivid/vivid-rds-gen.c 			data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
block             118 drivers/media/platform/vivid/vivid-rds-gen.c 			data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
block             175 drivers/media/radio/radio-wl1273.c 		status = rds.block;
block             181 drivers/media/radio/radio-wl1273.c 		rds.block = V4L2_RDS_BLOCK_MSK & status;
block             182 drivers/media/radio/radio-wl1273.c 		rds.block |= rds.block << 3;
block             186 drivers/media/radio/radio-wl1273.c 			rds.block |= V4L2_RDS_BLOCK_ERROR;
block             187 drivers/media/radio/radio-wl1273.c 			rds.block &= ~V4L2_RDS_BLOCK_CORRECTED;
block             189 drivers/media/radio/radio-wl1273.c 			rds.block &= ~V4L2_RDS_BLOCK_ERROR;
block             190 drivers/media/radio/radio-wl1273.c 			rds.block |= V4L2_RDS_BLOCK_CORRECTED;
block             521 drivers/media/usb/pwc/pwc-dec23.c 		const unsigned int *block;
block             560 drivers/media/usb/pwc/pwc-dec23.c 			block = pdec->table_subblock[rows];
block             562 drivers/media/usb/pwc/pwc-dec23.c 				pdec->temp_colors[i] += block[MulIdx[offset1][i]];
block             574 drivers/media/usb/pwc/pwc-dec23.c 			block = pdec->table_subblock[rows];
block             576 drivers/media/usb/pwc/pwc-dec23.c 				pdec->temp_colors[i] += block[MulIdx[offset1][i]];
block             123 drivers/media/usb/tm6000/tm6000-video.c 	unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
block             176 drivers/media/usb/tm6000/tm6000-video.c 			block = (header >> 7) & 0xf;
block             209 drivers/media/usb/tm6000/tm6000-video.c 					linewidth + block * TM6000_URB_MSG_LEN;
block             856 drivers/media/usb/usbvision/usbvision-core.c 	unsigned int pixel_per_line, block;
block             903 drivers/media/usb/usbvision/usbvision-core.c 	for (block = 0; block < (pixel_per_line / sub_block_size); block++) {
block            1091 drivers/media/usb/usbvision/usbvision-core.c 		scratch_rm_old(usbvision, y_step[block % y_step_size] * sub_block_size);
block            1092 drivers/media/usb/usbvision/usbvision-core.c 		scratch_inc_extra_ptr(&y_ptr, y_step[(block + 2 * block_split) % y_step_size]
block            1094 drivers/media/usb/usbvision/usbvision-core.c 		scratch_inc_extra_ptr(&u_ptr, uv_step[block % uv_step_size]
block            1096 drivers/media/usb/usbvision/usbvision-core.c 		scratch_inc_extra_ptr(&v_ptr, uv_step[(block + 2 * block_split) % uv_step_size]
block             123 drivers/mfd/qcom-pm8xxx.c static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
block             128 drivers/mfd/qcom-pm8xxx.c 	ret = pm8xxx_read_block_irq(chip, block, &bits);
block             130 drivers/mfd/qcom-pm8xxx.c 		pr_err("Failed reading %d block ret=%d", block, ret);
block             134 drivers/mfd/qcom-pm8xxx.c 		pr_err("block bit set in master but no irqs: %d", block);
block             141 drivers/mfd/qcom-pm8xxx.c 			pmirq = block * 8 + i;
block             200 drivers/mfd/qcom-pm8xxx.c 				     int master, int block)
block             206 drivers/mfd/qcom-pm8xxx.c 			  PM8821_SSBI_ADDR_IRQ_ROOT(master, block), &bits);
block             208 drivers/mfd/qcom-pm8xxx.c 		pr_err("Reading block %d failed ret=%d", block, ret);
block             213 drivers/mfd/qcom-pm8xxx.c 	block += (master * PM8821_BLOCKS_PER_MASTER) - 1;
block             218 drivers/mfd/qcom-pm8xxx.c 			pmirq = block * 8 + i;
block             228 drivers/mfd/qcom-pm8xxx.c 	int block;
block             230 drivers/mfd/qcom-pm8xxx.c 	for (block = 1; block < 8; block++)
block             231 drivers/mfd/qcom-pm8xxx.c 		if (master_val & BIT(block))
block             232 drivers/mfd/qcom-pm8xxx.c 			pm8821_irq_block_handler(chip, master, block);
block             275 drivers/mfd/qcom-pm8xxx.c 	u8	block, config;
block             277 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             280 drivers/mfd/qcom-pm8xxx.c 	pm8xxx_config_irq(chip, block, config);
block             287 drivers/mfd/qcom-pm8xxx.c 	u8	block, config;
block             289 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             292 drivers/mfd/qcom-pm8xxx.c 	pm8xxx_config_irq(chip, block, config);
block             300 drivers/mfd/qcom-pm8xxx.c 	u8 block, config;
block             302 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             322 drivers/mfd/qcom-pm8xxx.c 	return pm8xxx_config_irq(chip, block, config);
block             333 drivers/mfd/qcom-pm8xxx.c 	u8 block;
block             339 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             343 drivers/mfd/qcom-pm8xxx.c 	rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
block             345 drivers/mfd/qcom-pm8xxx.c 		pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
block             409 drivers/mfd/qcom-pm8xxx.c 	u8 block, master;
block             412 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             413 drivers/mfd/qcom-pm8xxx.c 	master = block / PM8821_BLOCKS_PER_MASTER;
block             415 drivers/mfd/qcom-pm8xxx.c 	block %= PM8821_BLOCKS_PER_MASTER;
block             418 drivers/mfd/qcom-pm8xxx.c 				PM8821_SSBI_ADDR_IRQ_MASK(master, block),
block             426 drivers/mfd/qcom-pm8xxx.c 				PM8821_SSBI_ADDR_IRQ_CLEAR(master, block),
block             437 drivers/mfd/qcom-pm8xxx.c 	u8 block, master;
block             439 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             440 drivers/mfd/qcom-pm8xxx.c 	master = block / PM8821_BLOCKS_PER_MASTER;
block             442 drivers/mfd/qcom-pm8xxx.c 	block %= PM8821_BLOCKS_PER_MASTER;
block             445 drivers/mfd/qcom-pm8xxx.c 				PM8821_SSBI_ADDR_IRQ_MASK(master, block),
block             458 drivers/mfd/qcom-pm8xxx.c 	u8 block, irq_bit, master;
block             461 drivers/mfd/qcom-pm8xxx.c 	block = pmirq / 8;
block             462 drivers/mfd/qcom-pm8xxx.c 	master = block / PM8821_BLOCKS_PER_MASTER;
block             464 drivers/mfd/qcom-pm8xxx.c 	block %= PM8821_BLOCKS_PER_MASTER;
block             467 drivers/mfd/qcom-pm8xxx.c 		PM8821_SSBI_ADDR_IRQ_RT_STATUS(master, block), &bits);
block             948 drivers/mfd/si476x-cmd.c 	report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A;
block             952 drivers/mfd/si476x-cmd.c 	report->rds[V4L2_RDS_BLOCK_B].block = V4L2_RDS_BLOCK_B;
block             956 drivers/mfd/si476x-cmd.c 	report->rds[V4L2_RDS_BLOCK_C].block = V4L2_RDS_BLOCK_C;
block             960 drivers/mfd/si476x-cmd.c 	report->rds[V4L2_RDS_BLOCK_D].block = V4L2_RDS_BLOCK_D;
block             266 drivers/mfd/stmpe.c int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block)
block             290 drivers/mfd/stmpe.c 	af = variant->get_altfunc(stmpe, block);
block             401 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             408 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             515 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             520 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_TOUCHSCREEN,
block             525 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_ADC,
block             576 drivers/mfd/stmpe.c static int stmpe811_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
block             579 drivers/mfd/stmpe.c 	return block != STMPE_BLOCK_TOUCHSCREEN;
block             640 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             703 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             708 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_KEYPAD,
block             713 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_PWM,
block             802 drivers/mfd/stmpe.c static int stmpe1601_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
block             804 drivers/mfd/stmpe.c 	switch (block) {
block             870 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             875 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_KEYPAD,
block             987 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_GPIO,
block             992 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_KEYPAD,
block             997 drivers/mfd/stmpe.c 		.block	= STMPE_BLOCK_PWM,
block            1016 drivers/mfd/stmpe.c static int stmpe24xx_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
block            1018 drivers/mfd/stmpe.c 	switch (block) {
block            1311 drivers/mfd/stmpe.c 		struct stmpe_variant_block *block = &variant->blocks[i];
block            1313 drivers/mfd/stmpe.c 		if (!(platform_blocks & block->block))
block            1316 drivers/mfd/stmpe.c 		for (j = 0; j < block->cell->num_resources; j++) {
block            1318 drivers/mfd/stmpe.c 				(struct resource *) &block->cell->resources[j];
block            1322 drivers/mfd/stmpe.c 				res->start = res->end = block->irq + j;
block            1325 drivers/mfd/stmpe.c 		platform_blocks &= ~block->block;
block            1326 drivers/mfd/stmpe.c 		ret = stmpe_add_device(stmpe, block->cell);
block              43 drivers/mfd/stmpe.h 	enum stmpe_block	block;
block              74 drivers/mfd/stmpe.h 	int (*get_altfunc)(struct stmpe *stmpe, enum stmpe_block block);
block             287 drivers/mfd/tc3589x.c 	unsigned int blocks = tc3589x->pdata->block;
block             347 drivers/mfd/tc3589x.c 			pdata->block |= TC3589x_BLOCK_GPIO;
block             349 drivers/mfd/tc3589x.c 			pdata->block |= TC3589x_BLOCK_KEYPAD;
block              44 drivers/misc/atmel_tclib.c struct atmel_tc *atmel_tc_alloc(unsigned block)
block              54 drivers/misc/atmel_tclib.c 		if ((tc->pdev->dev.of_node && tc->id == block) ||
block              55 drivers/misc/atmel_tclib.c 		    (tc->pdev->id == block)) {
block              22 drivers/misc/cb710/debug.c static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits)
block              26 drivers/misc/cb710/debug.c 	return ((allow[block] >> offset) & mask) == mask;
block            1415 drivers/misc/mei/client.c int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
block            1438 drivers/misc/mei/client.c 	if (!block)
block             209 drivers/misc/mei/client.h int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
block             433 drivers/misc/mei/main.c 	bool block = (file->f_flags & O_NONBLOCK) == 0;
block             436 drivers/misc/mei/main.c 	rets = mei_cl_notify_get(cl, block, &notify_ev);
block              30 drivers/misc/sram-exec.c int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
block              34 drivers/misc/sram-exec.c 	unsigned long end = base + block->size;
block              55 drivers/misc/sram.c static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
block              61 drivers/misc/sram.c 					  NUMA_NO_NODE, block->label);
block              66 drivers/misc/sram.c 				block->size, NUMA_NO_NODE);
block              75 drivers/misc/sram.c static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
block              88 drivers/misc/sram.c 	part->battr.size = block->size;
block              93 drivers/misc/sram.c static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
block             100 drivers/misc/sram.c 	part->base = sram->virt_base + block->start;
block             102 drivers/misc/sram.c 	if (block->pool) {
block             103 drivers/misc/sram.c 		ret = sram_add_pool(sram, block, start, part);
block             107 drivers/misc/sram.c 	if (block->export) {
block             108 drivers/misc/sram.c 		ret = sram_add_export(sram, block, start, part);
block             112 drivers/misc/sram.c 	if (block->protect_exec) {
block             113 drivers/misc/sram.c 		ret = sram_check_protect_exec(sram, block, part);
block             117 drivers/misc/sram.c 		ret = sram_add_pool(sram, block, start, part);
block             160 drivers/misc/sram.c 	struct sram_reserve *rblocks, *block;
block             179 drivers/misc/sram.c 	block = &rblocks[0];
block             199 drivers/misc/sram.c 		block->start = child_res.start - res->start;
block             200 drivers/misc/sram.c 		block->size = resource_size(&child_res);
block             201 drivers/misc/sram.c 		list_add_tail(&block->list, &reserve_list);
block             204 drivers/misc/sram.c 			block->export = true;
block             207 drivers/misc/sram.c 			block->pool = true;
block             210 drivers/misc/sram.c 			block->protect_exec = true;
block             212 drivers/misc/sram.c 		if ((block->export || block->pool || block->protect_exec) &&
block             213 drivers/misc/sram.c 		    block->size) {
block             227 drivers/misc/sram.c 			block->label = devm_kstrdup(sram->dev,
block             229 drivers/misc/sram.c 			if (!block->label) {
block             235 drivers/misc/sram.c 				block->export ? "exported " : "", block->label,
block             236 drivers/misc/sram.c 				block->start, block->start + block->size);
block             239 drivers/misc/sram.c 				block->start, block->start + block->size);
block             242 drivers/misc/sram.c 		block++;
block             264 drivers/misc/sram.c 	list_for_each_entry(block, &reserve_list, list) {
block             266 drivers/misc/sram.c 		if (block->start < cur_start) {
block             269 drivers/misc/sram.c 				block->start, cur_start);
block             275 drivers/misc/sram.c 		if ((block->export || block->pool || block->protect_exec) &&
block             276 drivers/misc/sram.c 		    block->size) {
block             277 drivers/misc/sram.c 			ret = sram_add_partition(sram, block,
block             278 drivers/misc/sram.c 						 res->start + block->start);
block             286 drivers/misc/sram.c 		if (block->start == cur_start) {
block             287 drivers/misc/sram.c 			cur_start = block->start + block->size;
block             296 drivers/misc/sram.c 		cur_size = block->start - cur_start;
block             310 drivers/misc/sram.c 		cur_start = block->start + block->size;
block              39 drivers/misc/sram.h int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
block              44 drivers/misc/sram.h 					  struct sram_reserve *block,
block            1025 drivers/mmc/host/bcm2835.c 	bool block, busy, data;
block            1029 drivers/mmc/host/bcm2835.c 	block = host->irq_block;
block            1040 drivers/mmc/host/bcm2835.c 	if (block)
block            2481 drivers/mtd/chips/cfi_cmdset_0001.c 	int block, status, i;
block            2490 drivers/mtd/chips/cfi_cmdset_0001.c 		for (block = 0; block < region->numblocks; block++){
block            2492 drivers/mtd/chips/cfi_cmdset_0001.c 			adr = region->offset + block * len;
block            2497 drivers/mtd/chips/cfi_cmdset_0001.c 				set_bit(block, region->lockmap);
block            2499 drivers/mtd/chips/cfi_cmdset_0001.c 				clear_bit(block, region->lockmap);
block            2581 drivers/mtd/chips/cfi_cmdset_0001.c 	int block, i;
block            2590 drivers/mtd/chips/cfi_cmdset_0001.c 		for_each_clear_bit(block, region->lockmap, region->numblocks) {
block            2592 drivers/mtd/chips/cfi_cmdset_0001.c 			adr = region->offset + block * len;
block             978 drivers/mtd/devices/docg3.c 	int block = DOC_LAYOUT_BLOCK_BBT;
block             984 drivers/mtd/devices/docg3.c 		ret = doc_read_page_prepare(docg3, block, block + 1,
block             958 drivers/mtd/ftl.c 			      unsigned long block, char *buf)
block             960 drivers/mtd/ftl.c 	return ftl_read((void *)dev, buf, block, 1);
block             964 drivers/mtd/ftl.c 			      unsigned long block, char *buf)
block             966 drivers/mtd/ftl.c 	return ftl_write((void *)dev, buf, block, 1);
block             242 drivers/mtd/inftlcore.c 	int block, silly;
block             267 drivers/mtd/inftlcore.c 		for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
block             268 drivers/mtd/inftlcore.c 			if ((BlockMap[block] != BLOCK_NIL) ||
block             269 drivers/mtd/inftlcore.c 			    BlockDeleted[block])
block             273 drivers/mtd/inftlcore.c 					   + (block * SECTORSIZE), 16, &retlen,
block             284 drivers/mtd/inftlcore.c 				BlockMap[block] = thisEUN;
block             287 drivers/mtd/inftlcore.c 				BlockDeleted[block] = 1;
block             292 drivers/mtd/inftlcore.c 					block, thisEUN, status);
block             313 drivers/mtd/inftlcore.c 	for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
block             321 drivers/mtd/inftlcore.c 		if (BlockMap[block] == targetEUN || (pendingblock ==
block             322 drivers/mtd/inftlcore.c 		    (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
block             330 drivers/mtd/inftlcore.c 		if (BlockMap[block] == BLOCK_NIL)
block             334 drivers/mtd/inftlcore.c 			       (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
block             340 drivers/mtd/inftlcore.c 				       (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
block             351 drivers/mtd/inftlcore.c 			    (block * SECTORSIZE), SECTORSIZE, &retlen,
block             460 drivers/mtd/inftlcore.c static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
block             462 drivers/mtd/inftlcore.c 	unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
block             464 drivers/mtd/inftlcore.c 	unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
block             473 drivers/mtd/inftlcore.c 			inftl, block);
block             490 drivers/mtd/inftlcore.c 					block , writeEUN, status);
block             537 drivers/mtd/inftlcore.c 			thisEUN = INFTL_makefreeblock(inftl, block);
block             631 drivers/mtd/inftlcore.c 	int block, silly;
block             654 drivers/mtd/inftlcore.c 		for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
block             655 drivers/mtd/inftlcore.c 			if (BlockUsed[block] || BlockDeleted[block])
block             659 drivers/mtd/inftlcore.c 					   + (block * SECTORSIZE), 8 , &retlen,
block             670 drivers/mtd/inftlcore.c 				BlockUsed[block] = 1;
block             673 drivers/mtd/inftlcore.c 				BlockDeleted[block] = 1;
block             678 drivers/mtd/inftlcore.c 					block, thisEUN, status);
block             691 drivers/mtd/inftlcore.c 	for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
block             692 drivers/mtd/inftlcore.c 		if (BlockUsed[block])
block             745 drivers/mtd/inftlcore.c static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
block             747 drivers/mtd/inftlcore.c 	unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
block             748 drivers/mtd/inftlcore.c 	unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
block             756 drivers/mtd/inftlcore.c 		"block=%d)\n", inftl, block);
block             777 drivers/mtd/inftlcore.c 				block, thisEUN, status);
block             784 drivers/mtd/inftlcore.c 				block / (inftl->EraseSize / SECTORSIZE));
block             799 drivers/mtd/inftlcore.c 		INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
block             804 drivers/mtd/inftlcore.c static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
block             809 drivers/mtd/inftlcore.c 	unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
block             815 drivers/mtd/inftlcore.c 		"buffer=%p)\n", inftl, block, buffer);
block             823 drivers/mtd/inftlcore.c 		writeEUN = INFTL_findwriteunit(inftl, block);
block             846 drivers/mtd/inftlcore.c 		INFTL_deleteblock(inftl, block);
block             852 drivers/mtd/inftlcore.c static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
block             856 drivers/mtd/inftlcore.c 	unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
block             857 drivers/mtd/inftlcore.c 	unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
block             865 drivers/mtd/inftlcore.c 		"buffer=%p)\n", inftl, block, buffer);
block             886 drivers/mtd/inftlcore.c 				block, thisEUN, status);
block             893 drivers/mtd/inftlcore.c 				block / (inftl->EraseSize / SECTORSIZE));
block              35 drivers/mtd/inftlmount.c 	unsigned int i, block;
block              55 drivers/mtd/inftlmount.c 	for (block = 0; block < inftl->nb_blocks; block++) {
block              62 drivers/mtd/inftlmount.c 		ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
block              72 drivers/mtd/inftlmount.c 					block * inftl->EraseSize,
block              89 drivers/mtd/inftlmount.c 				     block * inftl->EraseSize + SECTORSIZE + 8,
block              94 drivers/mtd/inftlmount.c 				"(err %d)\n", block * inftl->EraseSize,
block             107 drivers/mtd/inftlmount.c 		mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
block             172 drivers/mtd/inftlmount.c 			block >>= mh->BlockMultiplierBits;
block             287 drivers/mtd/inftlmount.c 		inftl->PUtable[block] = BLOCK_RESERVED;
block             301 drivers/mtd/inftlmount.c 		inftl->MediaUnit = block;
block             368 drivers/mtd/inftlmount.c int INFTL_formatblock(struct INFTLrecord *inftl, int block)
block             376 drivers/mtd/inftlmount.c 	pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
block             384 drivers/mtd/inftlmount.c 	instr->addr = block * inftl->EraseSize;
block             396 drivers/mtd/inftlmount.c 				block);
block             415 drivers/mtd/inftlmount.c 	instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
block             436 drivers/mtd/inftlmount.c 	unsigned int block = first_block, block1;
block             442 drivers/mtd/inftlmount.c 		block1 = inftl->PUtable[block];
block             444 drivers/mtd/inftlmount.c 		printk(KERN_WARNING "INFTL: formatting block %d\n", block);
block             445 drivers/mtd/inftlmount.c 		if (INFTL_formatblock(inftl, block) < 0) {
block             449 drivers/mtd/inftlmount.c 			inftl->PUtable[block] = BLOCK_RESERVED;
block             451 drivers/mtd/inftlmount.c 			inftl->PUtable[block] = BLOCK_FREE;
block             455 drivers/mtd/inftlmount.c 		block = block1;
block             457 drivers/mtd/inftlmount.c 		if (block == BLOCK_NIL || block >= inftl->lastEUN)
block             509 drivers/mtd/inftlmount.c 	int logical, block, i;
block             516 drivers/mtd/inftlmount.c 		block = s->VUtable[logical];
block             517 drivers/mtd/inftlmount.c 		if (block >= s->nb_blocks)
block             519 drivers/mtd/inftlmount.c 		pr_debug("  LOGICAL %d --> %d ", logical, block);
block             521 drivers/mtd/inftlmount.c 			if (s->PUtable[block] == BLOCK_NIL)
block             523 drivers/mtd/inftlmount.c 			block = s->PUtable[block];
block             524 drivers/mtd/inftlmount.c 			pr_debug("%d ", block);
block             536 drivers/mtd/inftlmount.c 	unsigned int block, first_block, prev_block, last_block;
block             557 drivers/mtd/inftlmount.c 	logical_block = block = BLOCK_NIL;
block             583 drivers/mtd/inftlmount.c 		block = first_block;
block             588 drivers/mtd/inftlmount.c 			    (s->PUtable[block] != BLOCK_NOTEXPLORED)) {
block             593 drivers/mtd/inftlmount.c 			if (inftl_read_oob(mtd, block * s->EraseSize + 8,
block             595 drivers/mtd/inftlmount.c 			    inftl_read_oob(mtd, block * s->EraseSize +
block             606 drivers/mtd/inftlmount.c 			ANACtable[block] = h0.ANAC;
block             613 drivers/mtd/inftlmount.c 			if (s->PUtable[block] != BLOCK_NOTEXPLORED) {
block             617 drivers/mtd/inftlmount.c 						s->PUtable[last_block] = block;
block             626 drivers/mtd/inftlmount.c 					"mark 0x%x?\n", block, first_block,
block             640 drivers/mtd/inftlmount.c 				s->PUtable[block] = BLOCK_FREE;
block             651 drivers/mtd/inftlmount.c 						block, first_block);
block             672 drivers/mtd/inftlmount.c 			s->PUtable[block] = BLOCK_NIL;
block             674 drivers/mtd/inftlmount.c 				s->PUtable[last_block] = block;
block             675 drivers/mtd/inftlmount.c 			last_block = block;
block             676 drivers/mtd/inftlmount.c 			block = prev_block;
block             679 drivers/mtd/inftlmount.c 			if (block == BLOCK_NIL)
block             683 drivers/mtd/inftlmount.c 			if (block > s->lastEUN) {
block             685 drivers/mtd/inftlmount.c 					"block %d in chain %d?\n", block,
block             716 drivers/mtd/inftlmount.c 		block = s->VUtable[logical_block];
block             720 drivers/mtd/inftlmount.c 		if (block >= BLOCK_RESERVED)
block             723 drivers/mtd/inftlmount.c 		ANAC = ANACtable[block];
block             725 drivers/mtd/inftlmount.c 			if (s->PUtable[block] == BLOCK_NIL)
block             727 drivers/mtd/inftlmount.c 			if (s->PUtable[block] > s->lastEUN) {
block             730 drivers/mtd/inftlmount.c 					s->PUtable[block], logical_block);
block             731 drivers/mtd/inftlmount.c 				s->PUtable[block] = BLOCK_NIL;
block             734 drivers/mtd/inftlmount.c 			if (ANACtable[block] != ANAC) {
block             740 drivers/mtd/inftlmount.c 				s->VUtable[logical_block] = block;
block             746 drivers/mtd/inftlmount.c 			last_block = block;
block             747 drivers/mtd/inftlmount.c 			block = s->PUtable[block];
block             769 drivers/mtd/inftlmount.c 	for (block = s->firstEUN; block <= s->lastEUN; block++) {
block             770 drivers/mtd/inftlmount.c 		if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
block             772 drivers/mtd/inftlmount.c 				block);
block             773 drivers/mtd/inftlmount.c 			if (INFTL_formatblock(s, block) < 0)
block             774 drivers/mtd/inftlmount.c 				s->PUtable[block] = BLOCK_RESERVED;
block             776 drivers/mtd/inftlmount.c 				s->PUtable[block] = BLOCK_FREE;
block             778 drivers/mtd/inftlmount.c 		if (s->PUtable[block] == BLOCK_FREE) {
block             781 drivers/mtd/inftlmount.c 				s->LastFreeEUN = block;
block              18 drivers/mtd/maps/vmu-flash.c 	unsigned int block;		/* Which block was cached */
block             211 drivers/mtd/maps/vmu-flash.c 		pcache->block = num;
block             380 drivers/mtd/maps/vmu-flash.c 			(pcache->block == vblock->num)) {
block              69 drivers/mtd/mtd_blkdevs.c 	unsigned long block, nsect;
block              72 drivers/mtd/mtd_blkdevs.c 	block = blk_rq_pos(req) << 9 >> tr->blkshift;
block              87 drivers/mtd/mtd_blkdevs.c 		if (tr->discard(dev, block, nsect))
block              92 drivers/mtd/mtd_blkdevs.c 		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
block              93 drivers/mtd/mtd_blkdevs.c 			if (tr->readsect(dev, block, buf)) {
block             107 drivers/mtd/mtd_blkdevs.c 		for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
block             108 drivers/mtd/mtd_blkdevs.c 			if (tr->writesect(dev, block, buf)) {
block             224 drivers/mtd/mtdblock.c 			      unsigned long block, char *buf)
block             227 drivers/mtd/mtdblock.c 	return do_cached_read(mtdblk, block<<9, 512, buf);
block             231 drivers/mtd/mtdblock.c 			      unsigned long block, char *buf)
block             243 drivers/mtd/mtdblock.c 	return do_cached_write(mtdblk, block<<9, 512, buf);
block              16 drivers/mtd/mtdblock_ro.c 			      unsigned long block, char *buf)
block              20 drivers/mtd/mtdblock_ro.c 	if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
block              26 drivers/mtd/mtdblock_ro.c 			      unsigned long block, char *buf)
block              30 drivers/mtd/mtdblock_ro.c 	if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
block             563 drivers/mtd/mtdswap.c 				unsigned int *block)
block             593 drivers/mtd/mtdswap.c 	*block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
block             597 drivers/mtd/mtdswap.c 	d->revmap[*block] = page;
block             735 drivers/mtd/mtdswap.c 	unsigned int i, block, eblk_base, newblock;
block             745 drivers/mtd/mtdswap.c 		block = eblk_base + i;
block             746 drivers/mtd/mtdswap.c 		if (d->revmap[block] == PAGE_UNDEF)
block             749 drivers/mtd/mtdswap.c 		ret = mtdswap_move_block(d, block, &newblock);
block             224 drivers/mtd/nand/onenand/onenand_base.c static int onenand_block_address(struct onenand_chip *this, int block)
block             227 drivers/mtd/nand/onenand/onenand_base.c 	if (block & this->density_mask)
block             228 drivers/mtd/nand/onenand/onenand_base.c 		return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
block             230 drivers/mtd/nand/onenand/onenand_base.c 	return block;
block             241 drivers/mtd/nand/onenand/onenand_base.c static int onenand_bufferram_address(struct onenand_chip *this, int block)
block             244 drivers/mtd/nand/onenand/onenand_base.c 	if (block & this->density_mask)
block             334 drivers/mtd/nand/onenand/onenand_base.c static loff_t flexonenand_addr(struct onenand_chip *this, int block)
block             339 drivers/mtd/nand/onenand/onenand_base.c 	if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
block             340 drivers/mtd/nand/onenand/onenand_base.c 		block -= this->density_mask;
block             346 drivers/mtd/nand/onenand/onenand_base.c 	ofs += (loff_t)block << (this->erase_shift - 1);
block             347 drivers/mtd/nand/onenand/onenand_base.c 	if (block > (boundary + 1))
block             348 drivers/mtd/nand/onenand/onenand_base.c 		ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
block             352 drivers/mtd/nand/onenand/onenand_base.c loff_t onenand_addr(struct onenand_chip *this, int block)
block             355 drivers/mtd/nand/onenand/onenand_base.c 		return (loff_t)block << this->erase_shift;
block             356 drivers/mtd/nand/onenand/onenand_base.c 	return flexonenand_addr(this, block);
block             401 drivers/mtd/nand/onenand/onenand_base.c 	int value, block, page;
block             409 drivers/mtd/nand/onenand/onenand_base.c 		block = -1;
block             415 drivers/mtd/nand/onenand/onenand_base.c 		block = addr * this->density_mask;
block             424 drivers/mtd/nand/onenand/onenand_base.c 		block = onenand_block(this, addr);
block             430 drivers/mtd/nand/onenand/onenand_base.c 		block = addr * this->density_mask;
block             435 drivers/mtd/nand/onenand/onenand_base.c 		block = onenand_block(this, addr);
block             437 drivers/mtd/nand/onenand/onenand_base.c 			page = (int) (addr - onenand_addr(this, block))>>\
block             443 drivers/mtd/nand/onenand/onenand_base.c 			block &= ~1;
block             446 drivers/mtd/nand/onenand/onenand_base.c 				block++;
block             456 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_bufferram_address(this, block);
block             469 drivers/mtd/nand/onenand/onenand_base.c 	if (block != -1) {
block             471 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_block_address(this, block);
block             475 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_bufferram_address(this, block);
block             876 drivers/mtd/nand/onenand/onenand_base.c 	int blockpage, block, page;
block             879 drivers/mtd/nand/onenand/onenand_base.c 	block = (int) (addr >> this->erase_shift) & ~1;
block             882 drivers/mtd/nand/onenand/onenand_base.c 		block++;
block             884 drivers/mtd/nand/onenand/onenand_base.c 	blockpage = (block << 7) | page;
block             923 drivers/mtd/nand/onenand/onenand_base.c 		int block = onenand_block(this, addr);
block             924 drivers/mtd/nand/onenand/onenand_base.c 		int value = onenand_bufferram_address(this, block);
block            2450 drivers/mtd/nand/onenand/onenand_base.c 	int block;
block            2453 drivers/mtd/nand/onenand/onenand_base.c 	block = onenand_block(this, ofs);
block            2455 drivers/mtd/nand/onenand/onenand_base.c                 bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
block            2504 drivers/mtd/nand/onenand/onenand_base.c 	int start, end, block, value, status;
block            2542 drivers/mtd/nand/onenand/onenand_base.c 	for (block = start; block < end + 1; block++) {
block            2544 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_block_address(this, block);
block            2547 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_bufferram_address(this, block);
block            2550 drivers/mtd/nand/onenand/onenand_base.c 		this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
block            2566 drivers/mtd/nand/onenand/onenand_base.c 				__func__, block, status);
block            2616 drivers/mtd/nand/onenand/onenand_base.c 	unsigned int value, block, status;
block            2620 drivers/mtd/nand/onenand/onenand_base.c 	for (block = 0; block < end; block++) {
block            2622 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_block_address(this, block);
block            2625 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_bufferram_address(this, block);
block            2628 drivers/mtd/nand/onenand/onenand_base.c 		this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
block            2634 drivers/mtd/nand/onenand/onenand_base.c 				__func__, block, status);
block            2700 drivers/mtd/nand/onenand/onenand_base.c 	int value, block, page;
block            2705 drivers/mtd/nand/onenand/onenand_base.c 		block = (int) (addr >> this->erase_shift);
block            2710 drivers/mtd/nand/onenand/onenand_base.c 		block = (int) (addr >> this->erase_shift);
block            2715 drivers/mtd/nand/onenand/onenand_base.c 			block &= ~1;
block            2718 drivers/mtd/nand/onenand/onenand_base.c 				block++;
block            2725 drivers/mtd/nand/onenand/onenand_base.c 	if (block != -1) {
block            2727 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_block_address(this, block);
block            2783 drivers/mtd/nand/onenand/onenand_base.c 	int block, value, status;
block            2802 drivers/mtd/nand/onenand/onenand_base.c 		block = (int) (to >> this->erase_shift);
block            2808 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_block_address(this, block);
block            2817 drivers/mtd/nand/onenand/onenand_base.c 		value = onenand_bufferram_address(this, block);
block            3517 drivers/mtd/nand/onenand/onenand_base.c 	int block;
block            3529 drivers/mtd/nand/onenand/onenand_base.c 	for (block = start; block <= end; block++) {
block            3530 drivers/mtd/nand/onenand/onenand_base.c 		addr = flexonenand_addr(this, block);
block            3548 drivers/mtd/nand/onenand/onenand_base.c 				__func__, block);
block             148 drivers/mtd/nand/onenand/onenand_bbt.c 	int block;
block             152 drivers/mtd/nand/onenand/onenand_bbt.c 	block = (int) (onenand_block(this, offs) << 1);
block             153 drivers/mtd/nand/onenand/onenand_bbt.c 	res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
block             156 drivers/mtd/nand/onenand/onenand_bbt.c 		(unsigned int) offs, block >> 1, res);
block             730 drivers/mtd/nand/onenand/samsung.c 	unsigned int block, end;
block             735 drivers/mtd/nand/onenand/samsung.c 	for (block = 0; block < end; block++) {
block             736 drivers/mtd/nand/onenand/samsung.c 		unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
block             740 drivers/mtd/nand/onenand/samsung.c 			dev_err(dev, "block %d is write-protected!\n", block);
block            2007 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	unsigned int block;
block            2032 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	for (block = 0; block < search_area_size_in_blocks; block++) {
block            2034 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		dev_dbg(dev, "\tErasing block 0x%x\n", block);
block            2035 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		status = nand_erase_op(chip, block);
block            2069 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	unsigned int block;
block            2098 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	for (block = 0; block < block_count; block++) {
block            2103 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
block            2104 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		page = block << (chip->phys_erase_shift - chip->page_shift);
block            2105 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		byte = block <<  chip->phys_erase_shift;
block            2122 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 			dev_dbg(dev, "Transcribing mark in block %u\n", block);
block              76 drivers/mtd/nand/raw/nand_bbt.c static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
block              78 drivers/mtd/nand/raw/nand_bbt.c 	uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
block              79 drivers/mtd/nand/raw/nand_bbt.c 	entry >>= (block & BBT_ENTRY_MASK) * 2;
block              83 drivers/mtd/nand/raw/nand_bbt.c static inline void bbt_mark_entry(struct nand_chip *chip, int block,
block              86 drivers/mtd/nand/raw/nand_bbt.c 	uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
block              87 drivers/mtd/nand/raw/nand_bbt.c 	chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
block             529 drivers/mtd/nand/raw/nand_bbt.c 	int startblock, block, dir;
block             558 drivers/mtd/nand/raw/nand_bbt.c 		for (block = 0; block < td->maxblocks; block++) {
block             560 drivers/mtd/nand/raw/nand_bbt.c 			int actblock = startblock + dir * block;
block             653 drivers/mtd/nand/raw/nand_bbt.c 		int block = startblock + dir * i;
block             656 drivers/mtd/nand/raw/nand_bbt.c 		switch (bbt_get_entry(this, block)) {
block             662 drivers/mtd/nand/raw/nand_bbt.c 		page = block << (this->bbt_erase_shift - this->page_shift);
block             666 drivers/mtd/nand/raw/nand_bbt.c 			return block;
block             686 drivers/mtd/nand/raw/nand_bbt.c 			       int chip, int block)
block             691 drivers/mtd/nand/raw/nand_bbt.c 	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
block             693 drivers/mtd/nand/raw/nand_bbt.c 	to = (loff_t)block << this->bbt_erase_shift;
block             697 drivers/mtd/nand/raw/nand_bbt.c 			res, block);
block             752 drivers/mtd/nand/raw/nand_bbt.c 		int block;
block             754 drivers/mtd/nand/raw/nand_bbt.c 		block = get_bbt_block(this, td, md, chip);
block             755 drivers/mtd/nand/raw/nand_bbt.c 		if (block < 0) {
block             757 drivers/mtd/nand/raw/nand_bbt.c 			res = block;
block             765 drivers/mtd/nand/raw/nand_bbt.c 		page = block << (this->bbt_erase_shift - this->page_shift);
block             863 drivers/mtd/nand/raw/nand_bbt.c 			mark_bbt_block_bad(this, td, chip, block);
block             873 drivers/mtd/nand/raw/nand_bbt.c 			mark_bbt_block_bad(this, td, chip, block);
block            1101 drivers/mtd/nand/raw/nand_bbt.c 	int i, j, chips, block, nrblocks, update;
block            1118 drivers/mtd/nand/raw/nand_bbt.c 			block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
block            1119 drivers/mtd/nand/raw/nand_bbt.c 			oldval = bbt_get_entry(this, block);
block            1120 drivers/mtd/nand/raw/nand_bbt.c 			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
block            1123 drivers/mtd/nand/raw/nand_bbt.c 				nand_update_bbt(this, (loff_t)block <<
block            1129 drivers/mtd/nand/raw/nand_bbt.c 			block = ((i + 1) * nrblocks) - td->maxblocks;
block            1131 drivers/mtd/nand/raw/nand_bbt.c 			block = i * nrblocks;
block            1133 drivers/mtd/nand/raw/nand_bbt.c 			oldval = bbt_get_entry(this, block);
block            1134 drivers/mtd/nand/raw/nand_bbt.c 			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
block            1137 drivers/mtd/nand/raw/nand_bbt.c 			block++;
block            1145 drivers/mtd/nand/raw/nand_bbt.c 			nand_update_bbt(this, (loff_t)(block - 1) <<
block            1403 drivers/mtd/nand/raw/nand_bbt.c 	int block;
block            1405 drivers/mtd/nand/raw/nand_bbt.c 	block = (int)(offs >> this->bbt_erase_shift);
block            1406 drivers/mtd/nand/raw/nand_bbt.c 	return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
block            1417 drivers/mtd/nand/raw/nand_bbt.c 	int block, res;
block            1419 drivers/mtd/nand/raw/nand_bbt.c 	block = (int)(offs >> this->bbt_erase_shift);
block            1420 drivers/mtd/nand/raw/nand_bbt.c 	res = bbt_get_entry(this, block);
block            1423 drivers/mtd/nand/raw/nand_bbt.c 		 (unsigned int)offs, block, res);
block            1443 drivers/mtd/nand/raw/nand_bbt.c 	int block, ret = 0;
block            1445 drivers/mtd/nand/raw/nand_bbt.c 	block = (int)(offs >> this->bbt_erase_shift);
block            1448 drivers/mtd/nand/raw/nand_bbt.c 	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
block             238 drivers/mtd/nftlcore.c 	int block;
block             265 drivers/mtd/nftlcore.c 		for (block = 0; block < nftl->EraseSize / 512; block ++) {
block             267 drivers/mtd/nftlcore.c 				      (block * 512), 16 , &retlen,
block             269 drivers/mtd/nftlcore.c 			if (block == 2) {
block             282 drivers/mtd/nftlcore.c 			BlockLastState[block] = status;
block             286 drivers/mtd/nftlcore.c 				BlockFreeFound[block] = 1;
block             290 drivers/mtd/nftlcore.c 				if (!BlockFreeFound[block])
block             291 drivers/mtd/nftlcore.c 					BlockMap[block] = thisEUN;
block             296 drivers/mtd/nftlcore.c 					       thisVUC, block);
block             299 drivers/mtd/nftlcore.c 				if (!BlockFreeFound[block])
block             300 drivers/mtd/nftlcore.c 					BlockMap[block] = BLOCK_NIL;
block             305 drivers/mtd/nftlcore.c 					       thisVUC, block);
block             312 drivers/mtd/nftlcore.c 				       block, thisEUN, status);
block             333 drivers/mtd/nftlcore.c 		for (block = 0; block < nftl->EraseSize / 512 ; block++) {
block             334 drivers/mtd/nftlcore.c 			if (BlockLastState[block] != SECTOR_FREE &&
block             335 drivers/mtd/nftlcore.c 			    BlockMap[block] != BLOCK_NIL &&
block             336 drivers/mtd/nftlcore.c 			    BlockMap[block] != targetEUN) {
block             340 drivers/mtd/nftlcore.c 				      thisVUC, block, BlockLastState[block],
block             341 drivers/mtd/nftlcore.c 				      BlockMap[block],
block             342 drivers/mtd/nftlcore.c 				      BlockMap[block]== targetEUN ? "==" : "!=",
block             391 drivers/mtd/nftlcore.c 	for (block = 0; block < nftl->EraseSize / 512 ; block++) {
block             396 drivers/mtd/nftlcore.c 		if (BlockMap[block] == targetEUN ||
block             397 drivers/mtd/nftlcore.c 		    (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
block             403 drivers/mtd/nftlcore.c 		if (BlockMap[block] == BLOCK_NIL)
block             407 drivers/mtd/nftlcore.c 			       (nftl->EraseSize * BlockMap[block]) + (block * 512),
block             413 drivers/mtd/nftlcore.c 				       (nftl->EraseSize * BlockMap[block]) + (block * 512),
block             424 drivers/mtd/nftlcore.c 			   (block * 512), 512, &retlen, movebuf, (char *)&oob);
block             522 drivers/mtd/nftlcore.c static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
block             525 drivers/mtd/nftlcore.c 	u16 thisVUC = block / (nftl->EraseSize / 512);
block             528 drivers/mtd/nftlcore.c 	unsigned long blockofs = (block * 512) & (nftl->EraseSize -1);
block             556 drivers/mtd/nftlcore.c 			      block , writeEUN, le16_to_cpu(bci.Status));
block             674 drivers/mtd/nftlcore.c static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
block             679 drivers/mtd/nftlcore.c 	unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
block             683 drivers/mtd/nftlcore.c 	writeEUN = NFTL_findwriteunit(nftl, block);
block             701 drivers/mtd/nftlcore.c static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
block             707 drivers/mtd/nftlcore.c 	u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
block             708 drivers/mtd/nftlcore.c 	unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
block             739 drivers/mtd/nftlcore.c 				       block, thisEUN, status);
block             745 drivers/mtd/nftlcore.c 				       block / (nftl->EraseSize / 512));
block              28 drivers/mtd/nftlmount.c 	unsigned int block, boot_record_count = 0;
block              48 drivers/mtd/nftlmount.c 	for (block = 0; block < nftl->nb_blocks; block++) {
block              53 drivers/mtd/nftlmount.c 		ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
block              62 drivers/mtd/nftlmount.c 				       block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
block              73 drivers/mtd/nftlmount.c 			       block * nftl->EraseSize, nftl->mbd.mtd->index);
block              79 drivers/mtd/nftlmount.c 		ret = nftl_read_oob(mtd, block * nftl->EraseSize +
block              84 drivers/mtd/nftlmount.c 			       block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
block              94 drivers/mtd/nftlmount.c 			       block * nftl->EraseSize, nftl->mbd.mtd->index,
block             100 drivers/mtd/nftlmount.c 		ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
block             104 drivers/mtd/nftlmount.c 			       block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
block             111 drivers/mtd/nftlmount.c 			       block * nftl->EraseSize, nftl->mbd.mtd->index);
block             123 drivers/mtd/nftlmount.c 				       nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
block             132 drivers/mtd/nftlmount.c 				nftl->SpareMediaUnit = block;
block             135 drivers/mtd/nftlmount.c 			nftl->ReplUnitTable[block] = BLOCK_RESERVED;
block             214 drivers/mtd/nftlmount.c 		nftl->ReplUnitTable[block] = BLOCK_RESERVED;
block             219 drivers/mtd/nftlmount.c The new DiskOnChip driver already scanned the bad block table.  Just query it.
block             223 drivers/mtd/nftlmount.c 						block * nftl->EraseSize + i +
block             243 drivers/mtd/nftlmount.c 		nftl->MediaUnit = block;
block             305 drivers/mtd/nftlmount.c int NFTL_formatblock(struct NFTLrecord *nftl, int block)
block             314 drivers/mtd/nftlmount.c 	if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8,
block             329 drivers/mtd/nftlmount.c 	instr->addr = block * nftl->EraseSize;
block             332 drivers/mtd/nftlmount.c 		printk("Error while formatting block %d\n", block);
block             352 drivers/mtd/nftlmount.c 	if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
block             375 drivers/mtd/nftlmount.c 	unsigned int block, i, status;
block             381 drivers/mtd/nftlmount.c 	block = first_block;
block             385 drivers/mtd/nftlmount.c 					  block * nftl->EraseSize + i * SECTORSIZE,
block             396 drivers/mtd/nftlmount.c 				    check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
block             400 drivers/mtd/nftlmount.c 					       i, block);
block             405 drivers/mtd/nftlmount.c 					nftl_write_oob(mtd, block *
block             417 drivers/mtd/nftlmount.c 		block = nftl->ReplUnitTable[block];
block             418 drivers/mtd/nftlmount.c 		if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
block             419 drivers/mtd/nftlmount.c 			printk("incorrect ReplUnitTable[] : %d\n", block);
block             420 drivers/mtd/nftlmount.c 		if (block == BLOCK_NIL || block >= nftl->nb_blocks)
block             428 drivers/mtd/nftlmount.c 	unsigned int length = 0, block = first_block;
block             439 drivers/mtd/nftlmount.c 		block = nftl->ReplUnitTable[block];
block             440 drivers/mtd/nftlmount.c 		if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
block             441 drivers/mtd/nftlmount.c 			printk("incorrect ReplUnitTable[] : %d\n", block);
block             442 drivers/mtd/nftlmount.c 		if (block == BLOCK_NIL || block >= nftl->nb_blocks)
block             460 drivers/mtd/nftlmount.c 	unsigned int block = first_block, block1;
block             465 drivers/mtd/nftlmount.c 		block1 = nftl->ReplUnitTable[block];
block             467 drivers/mtd/nftlmount.c 		printk("Formatting block %d\n", block);
block             468 drivers/mtd/nftlmount.c 		if (NFTL_formatblock(nftl, block) < 0) {
block             470 drivers/mtd/nftlmount.c 			nftl->ReplUnitTable[block] = BLOCK_RESERVED;
block             472 drivers/mtd/nftlmount.c 			nftl->ReplUnitTable[block] = BLOCK_FREE;
block             476 drivers/mtd/nftlmount.c 		block = block1;
block             478 drivers/mtd/nftlmount.c 		if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
block             479 drivers/mtd/nftlmount.c 			printk("incorrect ReplUnitTable[] : %d\n", block);
block             480 drivers/mtd/nftlmount.c 		if (block == BLOCK_NIL || block >= nftl->nb_blocks)
block             491 drivers/mtd/nftlmount.c static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
block             499 drivers/mtd/nftlmount.c 	if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
block             507 drivers/mtd/nftlmount.c 		if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
block             515 drivers/mtd/nftlmount.c 				   block * nftl->EraseSize + SECTORSIZE + 8, 8,
block             523 drivers/mtd/nftlmount.c 			if (check_free_sectors (nftl, block * nftl->EraseSize + i,
block             527 drivers/mtd/nftlmount.c 			if (nftl_read_oob(mtd, block * nftl->EraseSize + i,
block             552 drivers/mtd/nftlmount.c static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
block             558 drivers/mtd/nftlmount.c 	if (nftl_read_oob(mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
block             569 drivers/mtd/nftlmount.c 	unsigned int block, first_block, is_first_block;
block             592 drivers/mtd/nftlmount.c 			block = first_block;
block             599 drivers/mtd/nftlmount.c 						  block * s->EraseSize + 8, 8,
block             602 drivers/mtd/nftlmount.c 						  block * s->EraseSize +
block             605 drivers/mtd/nftlmount.c 					s->ReplUnitTable[block] = BLOCK_NIL;
block             621 drivers/mtd/nftlmount.c 						if (check_and_mark_free_block(s, block) < 0) {
block             623 drivers/mtd/nftlmount.c 							printk("Formatting block %d\n", block);
block             624 drivers/mtd/nftlmount.c 							if (NFTL_formatblock(s, block) < 0) {
block             626 drivers/mtd/nftlmount.c 								s->ReplUnitTable[block] = BLOCK_RESERVED;
block             628 drivers/mtd/nftlmount.c 								s->ReplUnitTable[block] = BLOCK_FREE;
block             632 drivers/mtd/nftlmount.c 							s->ReplUnitTable[block] = BLOCK_FREE;
block             640 drivers/mtd/nftlmount.c 						       block, first_block);
block             641 drivers/mtd/nftlmount.c 						s->ReplUnitTable[block] = BLOCK_NIL;
block             658 drivers/mtd/nftlmount.c 						       block, logical_block, first_logical_block);
block             667 drivers/mtd/nftlmount.c 						if (get_fold_mark(s, block) != FOLD_MARK_IN_PROGRESS ||
block             670 drivers/mtd/nftlmount.c 							       block);
block             676 drivers/mtd/nftlmount.c 							       block);
block             683 drivers/mtd/nftlmount.c 					s->ReplUnitTable[block] = BLOCK_NIL;
block             687 drivers/mtd/nftlmount.c 					       block, rep_block);
block             689 drivers/mtd/nftlmount.c 					s->ReplUnitTable[block] = BLOCK_NIL;
block             703 drivers/mtd/nftlmount.c 						s->ReplUnitTable[block] = rep_block;
block             707 drivers/mtd/nftlmount.c 						       block, rep_block);
block             710 drivers/mtd/nftlmount.c 						s->ReplUnitTable[block] = BLOCK_NIL;
block             715 drivers/mtd/nftlmount.c 					s->ReplUnitTable[block] = rep_block;
block             716 drivers/mtd/nftlmount.c 					block = rep_block;
block             770 drivers/mtd/nftlmount.c 	for (block = 0; block < s->nb_blocks; block++) {
block             771 drivers/mtd/nftlmount.c 		if (s->ReplUnitTable[block] == BLOCK_NOTEXPLORED) {
block             772 drivers/mtd/nftlmount.c 			printk("Unreferenced block %d, formatting it\n", block);
block             773 drivers/mtd/nftlmount.c 			if (NFTL_formatblock(s, block) < 0)
block             774 drivers/mtd/nftlmount.c 				s->ReplUnitTable[block] = BLOCK_RESERVED;
block             776 drivers/mtd/nftlmount.c 				s->ReplUnitTable[block] = BLOCK_FREE;
block             778 drivers/mtd/nftlmount.c 		if (s->ReplUnitTable[block] == BLOCK_FREE) {
block             780 drivers/mtd/nftlmount.c 			s->LastFreeEUN = block;
block              88 drivers/mtd/rfd_ftl.c 	struct block *blocks;
block              95 drivers/mtd/rfd_ftl.c 	struct block *block = &part->blocks[block_no];
block              98 drivers/mtd/rfd_ftl.c 	block->offset = part->block_size * block_no;
block             101 drivers/mtd/rfd_ftl.c 		block->state = BLOCK_UNUSED;
block             105 drivers/mtd/rfd_ftl.c 	block->state = BLOCK_OK;
block             116 drivers/mtd/rfd_ftl.c 			block->free_sectors++;
block             139 drivers/mtd/rfd_ftl.c 		part->sector_map[entry] = block->offset +
block             142 drivers/mtd/rfd_ftl.c 		block->used_sectors++;
block             145 drivers/mtd/rfd_ftl.c 	if (block->free_sectors == part->data_sectors_per_block)
block             188 drivers/mtd/rfd_ftl.c 	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
block             271 drivers/mtd/rfd_ftl.c static int erase_block(struct partition *part, int block)
block             280 drivers/mtd/rfd_ftl.c 	erase->addr = part->blocks[block].offset;
block             283 drivers/mtd/rfd_ftl.c 	part->blocks[block].state = BLOCK_ERASING;
block             284 drivers/mtd/rfd_ftl.c 	part->blocks[block].free_sectors = 0;
block             291 drivers/mtd/rfd_ftl.c 		part->blocks[block].state = BLOCK_FAILED;
block             292 drivers/mtd/rfd_ftl.c 		part->blocks[block].free_sectors = 0;
block             293 drivers/mtd/rfd_ftl.c 		part->blocks[block].used_sectors = 0;
block             298 drivers/mtd/rfd_ftl.c 		part->blocks[block].state = BLOCK_ERASED;
block             299 drivers/mtd/rfd_ftl.c 		part->blocks[block].free_sectors = part->data_sectors_per_block;
block             300 drivers/mtd/rfd_ftl.c 		part->blocks[block].used_sectors = 0;
block             301 drivers/mtd/rfd_ftl.c 		part->blocks[block].erases++;
block             303 drivers/mtd/rfd_ftl.c 		rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
block             310 drivers/mtd/rfd_ftl.c 			       part->mbd.mtd->name, part->blocks[block].offset);
block             311 drivers/mtd/rfd_ftl.c 			part->blocks[block].state = BLOCK_FAILED;
block             313 drivers/mtd/rfd_ftl.c 			part->blocks[block].state = BLOCK_OK;
block             412 drivers/mtd/rfd_ftl.c 	int block, best_block, score, old_sector_block;
block             425 drivers/mtd/rfd_ftl.c 	for (block=0; block<part->total_blocks; block++) {
block             428 drivers/mtd/rfd_ftl.c 		if (block == part->reserved_block)
block             436 drivers/mtd/rfd_ftl.c 		if (part->blocks[block].free_sectors)
block             439 drivers/mtd/rfd_ftl.c 		this_score = part->blocks[block].used_sectors;
block             441 drivers/mtd/rfd_ftl.c 		if (block == old_sector_block)
block             445 drivers/mtd/rfd_ftl.c 			if (part->blocks[block].used_sectors ==
block             450 drivers/mtd/rfd_ftl.c 		this_score += part->blocks[block].erases;
block             453 drivers/mtd/rfd_ftl.c 			best_block = block;
block             484 drivers/mtd/rfd_ftl.c 	int block, stop;
block             486 drivers/mtd/rfd_ftl.c 	block = part->current_block == -1 ?
block             488 drivers/mtd/rfd_ftl.c 	stop = block;
block             491 drivers/mtd/rfd_ftl.c 		if (part->blocks[block].free_sectors &&
block             492 drivers/mtd/rfd_ftl.c 				block != part->reserved_block)
block             493 drivers/mtd/rfd_ftl.c 			return block;
block             495 drivers/mtd/rfd_ftl.c 		if (part->blocks[block].state == BLOCK_UNUSED)
block             496 drivers/mtd/rfd_ftl.c 			erase_block(part, block);
block             498 drivers/mtd/rfd_ftl.c 		if (++block >= part->total_blocks)
block             499 drivers/mtd/rfd_ftl.c 			block = 0;
block             501 drivers/mtd/rfd_ftl.c 	} while (block != stop);
block             508 drivers/mtd/rfd_ftl.c 	int rc, block;
block             511 drivers/mtd/rfd_ftl.c 	block = find_free_block(part);
block             513 drivers/mtd/rfd_ftl.c 	if (block == -1) {
block             519 drivers/mtd/rfd_ftl.c 			block = find_free_block(part);
block             522 drivers/mtd/rfd_ftl.c 		if (block == -1) {
block             528 drivers/mtd/rfd_ftl.c 	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
block             538 drivers/mtd/rfd_ftl.c 				part->blocks[block].offset);
block             542 drivers/mtd/rfd_ftl.c 	part->current_block = block;
block             550 drivers/mtd/rfd_ftl.c 	int block, offset, rc;
block             555 drivers/mtd/rfd_ftl.c 	block = old_addr / part->block_size;
block             559 drivers/mtd/rfd_ftl.c 	addr = part->blocks[block].offset +
block             572 drivers/mtd/rfd_ftl.c 	if (block == part->current_block)
block             575 drivers/mtd/rfd_ftl.c 	part->blocks[block].used_sectors--;
block             577 drivers/mtd/rfd_ftl.c 	if (!part->blocks[block].used_sectors &&
block             578 drivers/mtd/rfd_ftl.c 	    !part->blocks[block].free_sectors)
block             579 drivers/mtd/rfd_ftl.c 		rc = erase_block(part, block);
block             585 drivers/mtd/rfd_ftl.c static int find_free_sector(const struct partition *part, const struct block *block)
block             589 drivers/mtd/rfd_ftl.c 	i = stop = part->data_sectors_per_block - block->free_sectors;
block             607 drivers/mtd/rfd_ftl.c 	struct block *block;
block             622 drivers/mtd/rfd_ftl.c 	block = &part->blocks[part->current_block];
block             624 drivers/mtd/rfd_ftl.c 	i = find_free_sector(part, block);
block             632 drivers/mtd/rfd_ftl.c 		block->offset;
block             651 drivers/mtd/rfd_ftl.c 	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
block             663 drivers/mtd/rfd_ftl.c 	block->used_sectors++;
block             664 drivers/mtd/rfd_ftl.c 	block->free_sectors--;
block             192 drivers/mtd/sm_ftl.c static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
block             196 drivers/mtd/sm_ftl.c 	WARN_ON(block >= ftl->zone_size);
block             199 drivers/mtd/sm_ftl.c 	if (block == -1)
block             202 drivers/mtd/sm_ftl.c 	return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
block             207 drivers/mtd/sm_ftl.c 			    int *zone, int *block, int *boffset)
block             211 drivers/mtd/sm_ftl.c 	*block = do_div(offset, ftl->max_lba);
block             239 drivers/mtd/sm_ftl.c 			  int zone, int block, int boffset,
block             249 drivers/mtd/sm_ftl.c 	if (block == -1) {
block             269 drivers/mtd/sm_ftl.c 		if (zone == 0 && block == ftl->cis_block && boffset ==
block             280 drivers/mtd/sm_ftl.c 	ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
block             285 drivers/mtd/sm_ftl.c 			block, zone, ret);
block             303 drivers/mtd/sm_ftl.c 			" as bad" , block, zone);
block             312 drivers/mtd/sm_ftl.c 			block, zone);
block             321 drivers/mtd/sm_ftl.c 			   int zone, int block, int boffset,
block             330 drivers/mtd/sm_ftl.c 	if (zone == 0 && (block == ftl->cis_block || block == 0)) {
block             345 drivers/mtd/sm_ftl.c 	ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
block             351 drivers/mtd/sm_ftl.c 			block, zone, ret);
block             368 drivers/mtd/sm_ftl.c 			  int zone, int block, int lba,
block             405 drivers/mtd/sm_ftl.c 		if (!sm_write_sector(ftl, zone, block, boffset,
block             417 drivers/mtd/sm_ftl.c 			if (sm_erase_block(ftl, zone, block, 0))
block             423 drivers/mtd/sm_ftl.c 			sm_mark_block_bad(ftl, zone, block);
block             432 drivers/mtd/sm_ftl.c static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
block             446 drivers/mtd/sm_ftl.c 	sm_printk("marking block %d of zone %d as bad", block, zone);
block             452 drivers/mtd/sm_ftl.c 		sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
block             459 drivers/mtd/sm_ftl.c static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
block             466 drivers/mtd/sm_ftl.c 	erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
block             474 drivers/mtd/sm_ftl.c 	if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
block             481 drivers/mtd/sm_ftl.c 							block, zone_num);
block             487 drivers/mtd/sm_ftl.c 			(const unsigned char *)&block, sizeof(block));
block             491 drivers/mtd/sm_ftl.c 	sm_mark_block_bad(ftl, zone_num, block);
block             496 drivers/mtd/sm_ftl.c static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
block             512 drivers/mtd/sm_ftl.c 		if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
block             527 drivers/mtd/sm_ftl.c 		sm_erase_block(ftl, zone, block, 1);
block             675 drivers/mtd/sm_ftl.c 	int block, boffset;
block             680 drivers/mtd/sm_ftl.c 	for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
block             682 drivers/mtd/sm_ftl.c 		if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
block             698 drivers/mtd/sm_ftl.c 		if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
block             709 drivers/mtd/sm_ftl.c 	ftl->cis_block = block;
block             722 drivers/mtd/sm_ftl.c 			block * ftl->block_size +
block             748 drivers/mtd/sm_ftl.c 	uint16_t block;
block             770 drivers/mtd/sm_ftl.c 	for (block = 0 ; block < ftl->zone_size ; block++) {
block             773 drivers/mtd/sm_ftl.c 		if (zone_num == 0 && block <= ftl->cis_block)
block             777 drivers/mtd/sm_ftl.c 		if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
block             787 drivers/mtd/sm_ftl.c 				(unsigned char *)&block, 2);
block             796 drivers/mtd/sm_ftl.c 			dbg("PH %04d <-> <marked bad>", block);
block             807 drivers/mtd/sm_ftl.c 			dbg("PH %04d <-> LBA %04d(bad)", block, lba);
block             815 drivers/mtd/sm_ftl.c 			dbg_verbose("PH %04d <-> LBA %04d", block, lba);
block             816 drivers/mtd/sm_ftl.c 			zone->lba_to_phys_table[lba] = block;
block             822 drivers/mtd/sm_ftl.c 			lba, zone->lba_to_phys_table[lba], block, zone_num);
block             825 drivers/mtd/sm_ftl.c 		if (sm_check_block(ftl, zone_num, block))
block             831 drivers/mtd/sm_ftl.c 			zone->lba_to_phys_table[lba] = block;
block             840 drivers/mtd/sm_ftl.c 		sm_erase_block(ftl, zone_num, block, 1);
block             859 drivers/mtd/sm_ftl.c 					(unsigned char *)&block, 2);
block             861 drivers/mtd/sm_ftl.c 		kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
block            1005 drivers/mtd/sm_ftl.c 	int zone_num, block, boffset;
block            1007 drivers/mtd/sm_ftl.c 	sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
block            1018 drivers/mtd/sm_ftl.c 	if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
block            1025 drivers/mtd/sm_ftl.c 	block = zone->lba_to_phys_table[block];
block            1027 drivers/mtd/sm_ftl.c 	if (block == -1) {
block            1032 drivers/mtd/sm_ftl.c 	if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
block            1050 drivers/mtd/sm_ftl.c 	int error = 0, zone_num, block, boffset;
block            1053 drivers/mtd/sm_ftl.c 	sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
block            1066 drivers/mtd/sm_ftl.c 	if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
block            1072 drivers/mtd/sm_ftl.c 		ftl->cache_block = block;
block              83 drivers/mtd/sm_ftl.h static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
block              85 drivers/mtd/sm_ftl.h static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
block             162 drivers/mtd/ubi/block.c module_param_cb(block, &ubiblock_param_ops, NULL, 0);
block             163 drivers/mtd/ubi/block.c MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
block              20 drivers/net/dsa/b53/b53_serdes.c static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
block              23 drivers/net/dsa/b53/b53_serdes.c 	b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
block              27 drivers/net/dsa/b53/b53_serdes.c static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block)
block              31 drivers/net/dsa/b53/b53_serdes.c 	b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
block              50 drivers/net/dsa/b53/b53_serdes.c 			     u8 offset, u16 block, u16 value)
block              53 drivers/net/dsa/b53/b53_serdes.c 	b53_serdes_write_blk(dev, offset, block, value);
block              57 drivers/net/dsa/b53/b53_serdes.c 			   u8 offset, u16 block)
block              60 drivers/net/dsa/b53/b53_serdes.c 	return b53_serdes_read_blk(dev, offset, block);
block             375 drivers/net/dsa/mv88e6xxx/port.h int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
block             378 drivers/net/dsa/mv88e6xxx/port.h int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
block              18 drivers/net/dsa/mv88e6xxx/port_hidden.c int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
block              31 drivers/net/dsa/mv88e6xxx/port_hidden.c 	       block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
block              47 drivers/net/dsa/mv88e6xxx/port_hidden.c int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
block              55 drivers/net/dsa/mv88e6xxx/port_hidden.c 	       block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
block             343 drivers/net/dsa/vitesse-vsc73xx-core.c int vsc73xx_is_addr_valid(u8 block, u8 subblock)
block             345 drivers/net/dsa/vitesse-vsc73xx-core.c 	switch (block) {
block             376 drivers/net/dsa/vitesse-vsc73xx-core.c static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
block             379 drivers/net/dsa/vitesse-vsc73xx-core.c 	return vsc->ops->read(vsc, block, subblock, reg, val);
block             382 drivers/net/dsa/vitesse-vsc73xx-core.c static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
block             385 drivers/net/dsa/vitesse-vsc73xx-core.c 	return vsc->ops->write(vsc, block, subblock, reg, val);
block             388 drivers/net/dsa/vitesse-vsc73xx-core.c static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
block             395 drivers/net/dsa/vitesse-vsc73xx-core.c 	ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
block             400 drivers/net/dsa/vitesse-vsc73xx-core.c 	return vsc73xx_write(vsc, block, subblock, reg, tmp);
block              42 drivers/net/dsa/vitesse-vsc73xx-platform.c static u32 vsc73xx_make_addr(u8 block, u8 subblock, u8 reg)
block              46 drivers/net/dsa/vitesse-vsc73xx-platform.c 	ret = (block & VSC73XX_CMD_PLATFORM_BLOCK_MASK)
block              55 drivers/net/dsa/vitesse-vsc73xx-platform.c static int vsc73xx_platform_read(struct vsc73xx *vsc, u8 block, u8 subblock,
block              61 drivers/net/dsa/vitesse-vsc73xx-platform.c 	if (!vsc73xx_is_addr_valid(block, subblock))
block              64 drivers/net/dsa/vitesse-vsc73xx-platform.c 	offset = vsc73xx_make_addr(block, subblock, reg);
block              73 drivers/net/dsa/vitesse-vsc73xx-platform.c static int vsc73xx_platform_write(struct vsc73xx *vsc, u8 block, u8 subblock,
block              79 drivers/net/dsa/vitesse-vsc73xx-platform.c 	if (!vsc73xx_is_addr_valid(block, subblock))
block              82 drivers/net/dsa/vitesse-vsc73xx-platform.c 	offset = vsc73xx_make_addr(block, subblock, reg);
block              40 drivers/net/dsa/vitesse-vsc73xx-spi.c static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
block              45 drivers/net/dsa/vitesse-vsc73xx-spi.c 	    (block & VSC73XX_CMD_SPI_BLOCK_MASK) << VSC73XX_CMD_SPI_BLOCK_SHIFT;
block              52 drivers/net/dsa/vitesse-vsc73xx-spi.c static int vsc73xx_spi_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
block              62 drivers/net/dsa/vitesse-vsc73xx-spi.c 	if (!vsc73xx_is_addr_valid(block, subblock))
block              77 drivers/net/dsa/vitesse-vsc73xx-spi.c 	cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_READ, block, subblock);
block              94 drivers/net/dsa/vitesse-vsc73xx-spi.c static int vsc73xx_spi_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
block             104 drivers/net/dsa/vitesse-vsc73xx-spi.c 	if (!vsc73xx_is_addr_valid(block, subblock))
block             119 drivers/net/dsa/vitesse-vsc73xx-spi.c 	cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_WRITE, block, subblock);
block              21 drivers/net/dsa/vitesse-vsc73xx.h 	int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
block              23 drivers/net/dsa/vitesse-vsc73xx.h 	int (*write)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
block              27 drivers/net/dsa/vitesse-vsc73xx.h int vsc73xx_is_addr_valid(u8 block, u8 subblock);
block              43 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h #define BLOCK_OPS_IDX(block, stage, end) \
block              44 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
block             492 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h #define BLOCK_OPS_IDX(block, stage, end) \
block             493 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
block             540 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h #define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
block             542 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	block##_REG_##block##_PRTY_MASK, \
block             543 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	block##_REG_##block##_PRTY_STS_CLR, \
block             544 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	en_mask, {m1, m1h, m2, m3}, #block \
block             547 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h #define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
block             549 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	block##_REG_##block##_PRTY_MASK_0, \
block             550 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	block##_REG_##block##_PRTY_STS_CLR_0, \
block             551 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	en_mask, {m1, m1h, m2, m3}, #block"_0" \
block             554 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h #define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
block             556 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	block##_REG_##block##_PRTY_MASK_1, \
block             557 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	block##_REG_##block##_PRTY_STS_CLR_1, \
block             558 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h 	en_mask, {m1, m1h, m2, m3}, #block"_1" \
block             229 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
block             232 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h 		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
block             235 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h 		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
block             335 drivers/net/ethernet/google/gve/gve.h 					       struct gve_notify_block *block)
block             337 drivers/net/ethernet/google/gve/gve.h 	return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
block             435 drivers/net/ethernet/google/gve/gve.h bool gve_tx_poll(struct gve_notify_block *block, int budget);
block             442 drivers/net/ethernet/google/gve/gve.h bool gve_rx_poll(struct gve_notify_block *block, int budget);
block              91 drivers/net/ethernet/google/gve/gve_main.c 	struct gve_notify_block *block = arg;
block              92 drivers/net/ethernet/google/gve/gve_main.c 	struct gve_priv *priv = block->priv;
block              94 drivers/net/ethernet/google/gve/gve_main.c 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
block              95 drivers/net/ethernet/google/gve/gve_main.c 	napi_schedule_irqoff(&block->napi);
block             101 drivers/net/ethernet/google/gve/gve_main.c 	struct gve_notify_block *block;
block             106 drivers/net/ethernet/google/gve/gve_main.c 	block = container_of(napi, struct gve_notify_block, napi);
block             107 drivers/net/ethernet/google/gve/gve_main.c 	priv = block->priv;
block             109 drivers/net/ethernet/google/gve/gve_main.c 	if (block->tx)
block             110 drivers/net/ethernet/google/gve/gve_main.c 		reschedule |= gve_tx_poll(block, budget);
block             111 drivers/net/ethernet/google/gve/gve_main.c 	if (block->rx)
block             112 drivers/net/ethernet/google/gve/gve_main.c 		reschedule |= gve_rx_poll(block, budget);
block             118 drivers/net/ethernet/google/gve/gve_main.c 	irq_doorbell = gve_irq_doorbell(priv, block);
block             125 drivers/net/ethernet/google/gve/gve_main.c 	if (block->tx)
block             126 drivers/net/ethernet/google/gve/gve_main.c 		reschedule |= gve_tx_poll(block, -1);
block             127 drivers/net/ethernet/google/gve/gve_main.c 	if (block->rx)
block             128 drivers/net/ethernet/google/gve/gve_main.c 		reschedule |= gve_rx_poll(block, -1);
block             200 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
block             203 drivers/net/ethernet/google/gve/gve_main.c 		snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
block             205 drivers/net/ethernet/google/gve/gve_main.c 		block->priv = priv;
block             207 drivers/net/ethernet/google/gve/gve_main.c 				  gve_intr, 0, block->name, block);
block             219 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
block             224 drivers/net/ethernet/google/gve/gve_main.c 		free_irq(priv->msix_vectors[msix_idx].vector, block);
block             246 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
block             251 drivers/net/ethernet/google/gve/gve_main.c 		free_irq(priv->msix_vectors[msix_idx].vector, block);
block             316 drivers/net/ethernet/google/gve/gve_main.c 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             318 drivers/net/ethernet/google/gve/gve_main.c 	netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
block             324 drivers/net/ethernet/google/gve/gve_main.c 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             326 drivers/net/ethernet/google/gve/gve_main.c 	netif_napi_del(&block->napi);
block             805 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             807 drivers/net/ethernet/google/gve/gve_main.c 		napi_disable(&block->napi);
block             811 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             813 drivers/net/ethernet/google/gve/gve_main.c 		napi_disable(&block->napi);
block             832 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             834 drivers/net/ethernet/google/gve/gve_main.c 		napi_enable(&block->napi);
block             835 drivers/net/ethernet/google/gve/gve_main.c 		iowrite32be(0, gve_irq_doorbell(priv, block));
block             839 drivers/net/ethernet/google/gve/gve_main.c 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             841 drivers/net/ethernet/google/gve/gve_main.c 		napi_enable(&block->napi);
block             842 drivers/net/ethernet/google/gve/gve_main.c 		iowrite32be(0, gve_irq_doorbell(priv, block));
block              13 drivers/net/ethernet/google/gve/gve_rx.c 	struct gve_notify_block *block =
block              16 drivers/net/ethernet/google/gve/gve_rx.c 	block->rx = NULL;
block              90 drivers/net/ethernet/google/gve/gve_rx.c 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block              93 drivers/net/ethernet/google/gve/gve_rx.c 	block->rx = rx;
block             425 drivers/net/ethernet/google/gve/gve_rx.c bool gve_rx_poll(struct gve_notify_block *block, int budget)
block             427 drivers/net/ethernet/google/gve/gve_rx.c 	struct gve_rx_ring *rx = block->rx;
block             431 drivers/net/ethernet/google/gve/gve_rx.c 	feat = block->napi.dev->features;
block             136 drivers/net/ethernet/google/gve/gve_tx.c 	struct gve_notify_block *block =
block             139 drivers/net/ethernet/google/gve/gve_tx.c 	block->tx = NULL;
block             178 drivers/net/ethernet/google/gve/gve_tx.c 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
block             181 drivers/net/ethernet/google/gve/gve_tx.c 	block->tx = tx;
block             574 drivers/net/ethernet/google/gve/gve_tx.c bool gve_tx_poll(struct gve_notify_block *block, int budget)
block             576 drivers/net/ethernet/google/gve/gve_tx.c 	struct gve_priv *priv = block->priv;
block             577 drivers/net/ethernet/google/gve/gve_tx.c 	struct gve_tx_ring *tx = block->tx;
block              29 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				struct rvu_block *block, int lf);
block              31 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				  struct rvu_block *block, int lf);
block              62 drivers/net/ethernet/marvell/octeontx2/af/rvu.c int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
block              68 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	reg = rvu->afreg_base + ((block << 28) | offset);
block             165 drivers/net/ethernet/marvell/octeontx2/af/rvu.c int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
block             171 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	for (lf = 0; lf < block->lf.max; lf++) {
block             172 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (block->fn_map[lf] == pcifunc) {
block             260 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				struct rvu_block *block, u16 pcifunc,
block             267 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (lf >= block->lf.max) {
block             270 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			__func__, lf, block->name, block->lf.max);
block             283 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->fn_map[lf] = attach ? pcifunc : 0;
block             285 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	switch (block->type) {
block             312 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
block             378 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block             383 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[blkaddr];
block             384 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	return block->implemented;
block             390 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block             396 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[blkid];
block             399 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			block->implemented = true;
block             403 drivers/net/ethernet/marvell/octeontx2/af/rvu.c int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
block             407 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             410 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
block             411 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
block             418 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block = &rvu->hw->block[blkaddr];
block             420 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             441 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
block             447 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	for (lf = 0; lf < block->lf.max; lf++) {
block             448 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		cfg = rvu_read64(rvu, block->addr,
block             449 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 block->lfcfg_reg | (lf << block->lfshift));
block             454 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		__set_bit(lf, block->lf.bmap);
block             458 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_update_rsrc_map(rvu, pfvf, block,
block             462 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_set_msix_offset(rvu, pfvf, block, lf);
block             602 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block             613 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[id];
block             614 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		kfree(block->lf.bmap);
block             643 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block             654 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[BLKADDR_NPA];
block             655 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             658 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lf.max = (cfg >> 16) & 0xFFF;
block             659 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->addr = BLKADDR_NPA;
block             660 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->type = BLKTYPE_NPA;
block             661 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfshift = 8;
block             662 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
block             663 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
block             664 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
block             665 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block             666 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block             667 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfreset_reg = NPA_AF_LF_RST;
block             668 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	sprintf(block->name, "NPA");
block             669 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_alloc_bitmap(&block->lf);
block             675 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[BLKADDR_NIX0];
block             676 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             679 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lf.max = cfg & 0xFFF;
block             680 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->addr = BLKADDR_NIX0;
block             681 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->type = BLKTYPE_NIX;
block             682 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfshift = 8;
block             683 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
block             684 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
block             685 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
block             686 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
block             687 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
block             688 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfreset_reg = NIX_AF_LF_RST;
block             689 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	sprintf(block->name, "NIX");
block             690 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_alloc_bitmap(&block->lf);
block             696 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[BLKADDR_SSO];
block             697 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             700 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lf.max = cfg & 0xFFFF;
block             701 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->addr = BLKADDR_SSO;
block             702 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->type = BLKTYPE_SSO;
block             703 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->multislot = true;
block             704 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfshift = 3;
block             705 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
block             706 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
block             707 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
block             708 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block             709 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block             710 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
block             711 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	sprintf(block->name, "SSO GROUP");
block             712 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_alloc_bitmap(&block->lf);
block             718 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[BLKADDR_SSOW];
block             719 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             721 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lf.max = (cfg >> 56) & 0xFF;
block             722 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->addr = BLKADDR_SSOW;
block             723 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->type = BLKTYPE_SSOW;
block             724 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->multislot = true;
block             725 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfshift = 3;
block             726 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
block             727 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
block             728 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
block             729 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block             730 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block             731 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
block             732 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	sprintf(block->name, "SSOWS");
block             733 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_alloc_bitmap(&block->lf);
block             739 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[BLKADDR_TIM];
block             740 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             743 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lf.max = cfg & 0xFFFF;
block             744 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->addr = BLKADDR_TIM;
block             745 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->type = BLKTYPE_TIM;
block             746 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->multislot = true;
block             747 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfshift = 3;
block             748 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
block             749 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
block             750 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
block             751 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block             752 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block             753 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfreset_reg = TIM_AF_LF_RST;
block             754 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	sprintf(block->name, "TIM");
block             755 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_alloc_bitmap(&block->lf);
block             761 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[BLKADDR_CPT0];
block             762 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->implemented)
block             765 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lf.max = cfg & 0xFF;
block             766 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->addr = BLKADDR_CPT0;
block             767 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->type = BLKTYPE_CPT;
block             768 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->multislot = true;
block             769 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfshift = 3;
block             770 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
block             771 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
block             772 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
block             773 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
block             774 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
block             775 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block->lfreset_reg = CPT_AF_LF_RST;
block             776 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	sprintf(block->name, "CPT");
block             777 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	err = rvu_alloc_bitmap(&block->lf);
block             800 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[blkid];
block             801 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (!block->lf.bmap)
block             805 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
block             807 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (!block->fn_map)
block             813 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_scan_block(rvu, block);
block             924 drivers/net/ethernet/marvell/octeontx2/af/rvu.c static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
block             930 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rvu_write64(rvu, block->addr, block->lookup_reg, val);
block             933 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
block             936 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	val = rvu_read64(rvu, block->addr, block->lookup_reg);
block             949 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block             957 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[blkaddr];
block             959 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
block             964 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
block             969 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
block             970 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			    (lf << block->lfshift), 0x00ULL);
block             973 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_update_rsrc_map(rvu, pfvf, block,
block             977 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_free_rsrc(&block->lf, lf);
block             980 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_clear_msix_offset(rvu, pfvf, block, lf);
block             989 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block            1002 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[blkid];
block            1003 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (!block->lf.bmap)
block            1019 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_detach_block(rvu, pcifunc, block->type);
block            1038 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block            1050 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &hw->block[blkaddr];
block            1051 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (!block->lf.bmap)
block            1056 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_alloc_rsrc(&block->lf);
block            1061 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
block            1062 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			    (lf << block->lfshift), cfg);
block            1063 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_update_rsrc_map(rvu, pfvf, block,
block            1067 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		rvu_set_msix_offset(rvu, pfvf, block, lf);
block            1076 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block            1081 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[BLKADDR_NPA];
block            1082 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		free_lfs = rvu_rsrc_free_count(&block->lf);
block            1094 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[BLKADDR_NIX0];
block            1095 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		free_lfs = rvu_rsrc_free_count(&block->lf);
block            1106 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[BLKADDR_SSO];
block            1108 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->sso > block->lf.max) {
block            1111 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->sso, block->lf.max);
block            1114 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
block            1115 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		free_lfs = rvu_rsrc_free_count(&block->lf);
block            1123 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[BLKADDR_SSOW];
block            1124 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->ssow > block->lf.max) {
block            1127 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->sso, block->lf.max);
block            1130 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
block            1131 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		free_lfs = rvu_rsrc_free_count(&block->lf);
block            1138 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[BLKADDR_TIM];
block            1139 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->timlfs > block->lf.max) {
block            1142 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->timlfs, block->lf.max);
block            1145 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
block            1146 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		free_lfs = rvu_rsrc_free_count(&block->lf);
block            1153 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		block = &hw->block[BLKADDR_CPT0];
block            1154 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (req->cptlfs > block->lf.max) {
block            1157 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				 pcifunc, req->cptlfs, block->lf.max);
block            1160 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
block            1161 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		free_lfs = rvu_rsrc_free_count(&block->lf);
block            1170 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	dev_info(rvu->dev, "Request for %s failed\n", block->name);
block            1249 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				struct rvu_block *block, int lf)
block            1254 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
block            1255 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			 (lf << block->lfshift));
block            1265 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
block            1266 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
block            1270 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
block            1274 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				  struct rvu_block *block, int lf)
block            1279 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
block            1280 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			 (lf << block->lfshift));
block            1284 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rvu_write64(rvu, block->addr, block->msixcfg_reg |
block            1285 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		    (lf << block->lfshift), cfg & ~0x7FFULL);
block            1287 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
block            1310 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
block            1313 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
block            1318 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
block            1325 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
block            1332 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
block            1339 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
block            1758 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct rvu_block *block;
block            1762 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	block = &rvu->hw->block[blkaddr];
block            1764 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 					block->type);
block            1768 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		lf = rvu_get_lf(rvu, block, pcifunc, slot);
block            1773 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		if (block->addr == BLKADDR_NIX0)
block            1774 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
block            1775 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		else if (block->addr == BLKADDR_NPA)
block            1778 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		err = rvu_lf_reset(rvu, block, lf);
block            1781 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 				block->addr, lf);
block             208 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 	struct rvu_block block[BLK_COUNT]; /* Block info */
block             268 drivers/net/ethernet/marvell/octeontx2/af/rvu.h static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
block             270 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 	writeq(val, rvu->afreg_base + ((block << 28) | offset));
block             273 drivers/net/ethernet/marvell/octeontx2/af/rvu.h static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
block             275 drivers/net/ethernet/marvell/octeontx2/af/rvu.h 	return readq(rvu->afreg_base + ((block << 28) | offset));
block             315 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
block             316 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
block             318 drivers/net/ethernet/marvell/octeontx2/af/rvu.h int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
block              84 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block              90 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &rvu->hw->block[blkaddr];
block              91 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	return block->lf.max;
block             413 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
block             416 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct admin_queue *aq = block->aq;
block             424 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
block             434 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
block             457 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block             468 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &hw->block[blkaddr];
block             469 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	aq = block->aq;
block             476 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
block             595 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
block             740 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block             753 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &hw->block[blkaddr];
block             754 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
block             790 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = rvu_lf_reset(rvu, block, nixlf);
block             793 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			block->addr - BLKADDR_NIX0, nixlf);
block             942 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block             951 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &hw->block[blkaddr];
block             952 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
block             959 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = rvu_lf_reset(rvu, block, nixlf);
block             962 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			block->addr - BLKADDR_NIX0, nixlf);
block            1255 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            1338 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            1515 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            1541 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
block            1606 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            1959 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            2167 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            2272 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            2300 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block            2493 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
block            2521 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block            2531 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &hw->block[blkaddr];
block            2532 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
block            2659 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
block            2665 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
block            2668 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
block            2671 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
block            2675 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
block            2677 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
block            2683 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = rvu_aq_alloc(rvu, &block->aq,
block            2689 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
block            2690 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_write64(rvu, block->addr,
block            2691 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
block            2698 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block            2705 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &hw->block[blkaddr];
block            2729 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	err = nix_aq_init(rvu, block);
block            2798 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	struct rvu_block *block;
block            2808 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	block = &hw->block[blkaddr];
block            2809 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	rvu_aq_free(rvu, block->aq);
block            2838 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
block              18 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
block              21 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	struct admin_queue *aq = block->aq;
block              29 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
block              39 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
block              62 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	struct rvu_block *block;
block              76 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	block = &hw->block[blkaddr];
block              77 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	aq = block->aq;
block              83 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
block             147 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rc = npa_aq_enqueue_wait(rvu, block, &inst);
block             283 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	struct rvu_block *block;
block             297 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	block = &hw->block[blkaddr];
block             298 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
block             303 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	err = rvu_lf_reset(rvu, block, npalf);
block             380 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	struct rvu_block *block;
block             390 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	block = &hw->block[blkaddr];
block             391 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
block             396 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	err = rvu_lf_reset(rvu, block, npalf);
block             407 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
block             413 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
block             416 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
block             419 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
block             423 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
block             425 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
block             431 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	err = rvu_aq_alloc(rvu, &block->aq,
block             437 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
block             438 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_write64(rvu, block->addr,
block             439 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
block             453 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	err = npa_aq_init(rvu, &hw->block[blkaddr]);
block             463 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	struct rvu_block *block;
block             470 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	block = &hw->block[blkaddr];
block             471 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	rvu_aq_free(rvu, block->aq);
block             161 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int verify_block_sig(struct mlx5_cmd_prot_block *block)
block             164 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
block             166 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
block             169 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
block             175 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static void calc_block_sig(struct mlx5_cmd_prot_block *block)
block             177 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
block             180 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
block             181 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
block            1119 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	struct mlx5_cmd_prot_block *block;
block            1139 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block = next->buf;
block            1140 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		memcpy(block->data, from, copy);
block            1143 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block->token = token;
block            1152 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	struct mlx5_cmd_prot_block *block;
block            1172 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block = next->buf;
block            1174 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		memcpy(to, block->data, copy);
block            1216 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	struct mlx5_cmd_prot_block *block;
block            1237 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block = tmp->buf;
block            1239 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
block            1240 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block->block_num = cpu_to_be32(n - i - 1);
block            1241 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		block->token = token;
block              94 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 					struct mlx5_hv_vhca_control_block *block)
block             102 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	block->version = MLX5_HV_VHCA_STATS_VERSION;
block             103 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	block->rings   = priv->max_nch;
block             105 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	if (!block->command) {
block             110 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	sagent->delay = block->command == MLX5_HV_VHCA_STATS_UPDATE_ONCE ? 0 :
block             111 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 			msecs_to_jiffies(block->command * 100);
block             804 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		block_cb = flow_block_cb_lookup(f->block,
block              35 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 			struct mlx5_hv_vhca_control_block *block);
block             115 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 					struct mlx5_hv_vhca_control_block *block)
block             125 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 		if (!(AGENT_MASK(agent->type) & block->control))
block             128 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 		agent->control(agent, block);
block             151 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	struct mlx5_hv_vhca_control_block *block;
block             155 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             156 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	if (!block)
block             159 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	err = mlx5_hv_read_config(dev, block, sizeof(*block), 0);
block             167 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 		memset(block, 0, sizeof(*block));
block             171 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	if (block->capabilities != capabilities)
block             172 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 		block->capabilities = capabilities;
block             174 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	if (block->control & ~capabilities)
block             177 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	mlx5_hv_vhca_agents_control(hv_vhca, block);
block             178 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	block->command_ack = block->command;
block             181 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	mlx5_hv_write_config(dev, block, sizeof(*block), 0);
block             184 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 	kfree(block);
block             255 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c 					  struct mlx5_hv_vhca_control_block *block),
block              42 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h 					  struct mlx5_hv_vhca_control_block *block),
block              83 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h 					  struct mlx5_hv_vhca_control_block *block),
block             509 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct mlx5_cmd_prot_block *block;
block             513 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
block             289 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	void (*destructor)(struct mlxsw_afa_block *block,
block             293 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
block             296 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	list_add(&resource->list, &block->resource_list);
block             304 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
block             308 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
block             309 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		resource->destructor(block, resource);
block             315 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	struct mlxsw_afa_block *block;
block             317 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             318 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (!block)
block             320 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	INIT_LIST_HEAD(&block->resource_list);
block             321 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->afa = mlxsw_afa;
block             324 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->first_set = mlxsw_afa_set_create(true);
block             325 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (!block->first_set)
block             332 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->cur_set = mlxsw_afa_set_create(false);
block             333 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		if (!block->cur_set)
block             335 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->cur_set->prev = block->first_set;
block             336 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->first_set->next = block->cur_set;
block             338 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->cur_set = block->first_set;
block             341 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	return block;
block             344 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_set_destroy(block->first_set);
block             346 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	kfree(block);
block             351 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
block             353 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	struct mlxsw_afa_set *set = block->first_set;
block             358 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		mlxsw_afa_set_put(block->afa, set);
block             361 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_resources_destroy(block);
block             362 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	kfree(block);
block             366 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
block             368 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	struct mlxsw_afa_set *set = block->cur_set;
block             371 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->cur_set = NULL;
block             372 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->finished = true;
block             381 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		set = mlxsw_afa_set_get(block->afa, set);
block             395 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->first_set = set;
block             400 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
block             402 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	return block->first_set->ht_key.enc_actions;
block             406 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
block             408 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	return block->cur_set->ht_key.enc_actions;
block             412 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
block             417 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (WARN_ON(!block->first_set->next))
block             419 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	return block->first_set->next->kvdl_index;
block             423 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
block             425 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
block             427 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
block             432 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
block             434 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (block->finished)
block             436 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_set_goto_set(block->cur_set,
block             438 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->finished = true;
block             443 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
block             445 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (block->finished)
block             447 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_set_goto_set(block->cur_set,
block             449 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->finished = true;
block             454 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
block             456 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (block->finished)
block             458 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_set_goto_set(block->cur_set,
block             460 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->finished = true;
block             538 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
block             542 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
block             547 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
block             554 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
block             558 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
block             567 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
block             574 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
block             588 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
block             592 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->afa->ops->counter_index_put(block->afa->ops_priv,
block             598 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
block             604 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_counter_destroy(block, counter);
block             608 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
block             617 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	err = block->afa->ops->counter_index_get(block->afa->ops_priv,
block             622 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_resource_add(block, &counter->resource);
block             633 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
block             639 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (block->finished)
block             641 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	if (block->cur_act_index + action_size >
block             642 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	    block->afa->max_acts_per_set) {
block             651 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		set->prev = block->cur_set;
block             652 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->cur_act_index = 0;
block             653 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->cur_set->next = set;
block             654 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 		block->cur_set = set;
block             657 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	actions = block->cur_set->ht_key.enc_actions;
block             658 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
block             659 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->cur_act_index += action_size;
block             730 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
block             734 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block             816 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
block             818 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block             830 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
block             832 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block             845 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
block             848 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block             869 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
block             873 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	block->afa->ops->mirror_del(block->afa->ops_priv,
block             881 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
block             887 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_mirror_destroy(block, mirror);
block             891 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
block             901 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	err = block->afa->ops->mirror_add(block->afa->ops_priv,
block             910 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_resource_add(block, &mirror->resource);
block             919 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
block             922 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block             934 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
block             941 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
block             947 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
block             956 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_mirror_destroy(block, mirror);
block            1000 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
block            1013 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
block            1020 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
block            1032 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
block            1074 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
block            1077 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
block            1087 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
block            1095 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	counter = mlxsw_afa_counter_create(block);
block            1102 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
block            1112 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	mlxsw_afa_counter_destroy(block, counter);
block            1149 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
block            1152 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block            1220 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
block            1224 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c 	char *act = mlxsw_afa_block_append_action(block,
block              36 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
block              37 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
block              38 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
block              39 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
block              40 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
block              41 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
block              42 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
block              43 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
block              44 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
block              45 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
block              46 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
block              47 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
block              49 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
block              54 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
block              57 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
block              60 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
block              62 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
block              65 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
block              67 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
block              26 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 		const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
block              28 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 		for (j = 0; j < block->instances_count; j++) {
block              31 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 			elinst = &block->instances[j];
block             113 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 		const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
block             115 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 		for (j = 0; j < block->instances_count; j++) {
block             118 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 			elinst = &block->instances[j];
block             295 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
block             300 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 	for (i = 0; i < block->instances_count; i++) {
block             303 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 		elinst = &block->instances[i];
block             316 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 	const struct mlxsw_afk_block *block;
block             322 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 	block = key_info->blocks[block_index];
block             324 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c 	elinst = mlxsw_afk_block_elinst_get(block, element);
block             209 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h 	void (*encode_block)(char *output, int block_index, char *block);
block            1609 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	block_cb = flow_block_cb_lookup(f->block,
block            1662 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	block_cb = flow_block_cb_lookup(f->block,
block            1724 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
block             662 drivers/net/ethernet/mellanox/mlxsw/spectrum.h struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
block             663 drivers/net/ethernet/mellanox/mlxsw/spectrum.h unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
block             664 drivers/net/ethernet/mellanox/mlxsw/spectrum.h void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
block             665 drivers/net/ethernet/mellanox/mlxsw/spectrum.h void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
block             666 drivers/net/ethernet/mellanox/mlxsw/spectrum.h bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
block             669 drivers/net/ethernet/mellanox/mlxsw/spectrum.h void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
block             671 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			    struct mlxsw_sp_acl_block *block,
block             676 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			      struct mlxsw_sp_acl_block *block,
block             679 drivers/net/ethernet/mellanox/mlxsw/spectrum.h bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block);
block             682 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
block             686 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			 struct mlxsw_sp_acl_block *block, u32 chain_index,
block             715 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 				  struct mlxsw_sp_acl_block *block,
block             823 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			    struct mlxsw_sp_acl_block *block,
block             826 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			     struct mlxsw_sp_acl_block *block,
block             829 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			  struct mlxsw_sp_acl_block *block,
block             832 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 				 struct mlxsw_sp_acl_block *block,
block             835 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 				   struct mlxsw_sp_acl_block *block,
block              51 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	struct mlxsw_sp_acl_block *block;
block              97 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
block              99 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	return block->mlxsw_sp;
block             102 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
block             104 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	return block ? block->rule_count : 0;
block             107 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
block             109 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (block)
block             110 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		block->disable_count++;
block             113 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
block             115 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (block)
block             116 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		block->disable_count--;
block             119 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
block             121 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	return block->disable_count;
block             124 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
block             128 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	list_for_each_entry(binding, &block->binding_list, list) {
block             144 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			  struct mlxsw_sp_acl_block *block,
block             147 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
block             156 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			    struct mlxsw_sp_acl_block *block,
block             159 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
block             166 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
block             168 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	return block->ruleset_zero;
block             174 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 				struct mlxsw_sp_acl_block *block)
block             179 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->ruleset_zero = ruleset;
block             180 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	list_for_each_entry(binding, &block->binding_list, list) {
block             181 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
block             188 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
block             190 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block             191 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->ruleset_zero = NULL;
block             199 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 				  struct mlxsw_sp_acl_block *block)
block             203 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	list_for_each_entry(binding, &block->binding_list, list)
block             204 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block             205 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->ruleset_zero = NULL;
block             211 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	struct mlxsw_sp_acl_block *block;
block             213 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             214 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (!block)
block             216 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	INIT_LIST_HEAD(&block->binding_list);
block             217 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->mlxsw_sp = mlxsw_sp;
block             218 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->net = net;
block             219 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	return block;
block             222 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
block             224 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	WARN_ON(!list_empty(&block->binding_list));
block             225 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	kfree(block);
block             229 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
block             234 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	list_for_each_entry(binding, &block->binding_list, list)
block             242 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			    struct mlxsw_sp_acl_block *block,
block             250 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
block             253 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (!ingress && block->egress_blocker_rule_count) {
block             264 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
block             265 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
block             270 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	list_add(&binding->list, &block->binding_list);
block             279 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			      struct mlxsw_sp_acl_block *block,
block             285 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
block             291 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (mlxsw_sp_acl_ruleset_block_bound(block))
block             292 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block             300 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
block             314 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	ruleset->ht_key.block = block;
block             371 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			      struct mlxsw_sp_acl_block *block, u32 chain_index,
block             377 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	ht_key.block = block;
block             386 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
block             396 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
block             404 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 			 struct mlxsw_sp_acl_block *block, u32 chain_index,
block             416 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
block             421 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
block             563 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 				  struct mlxsw_sp_acl_block *block,
block             570 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	if (!list_is_singular(&block->binding_list)) {
block             574 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	binding = list_first_entry(&block->binding_list,
block             683 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
block             701 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
block             709 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->rule_count++;
block             710 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
block             726 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
block             728 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
block             729 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 	ruleset->ht_key.block->rule_count--;
block             736 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 						  ruleset->ht_key.block);
block             102 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c 				       char *block)
block             107 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c 	memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
block             239 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
block             296 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c 				       char *block)
block             298 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c 	u64 block_value = mlxsw_sp2_afk_block_value_get(block);
block              18 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 					 struct mlxsw_sp_acl_block *block,
block              63 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
block              82 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
block             115 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 							    block, out_dev,
block             144 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 				      struct mlxsw_sp_acl_block *block)
block             160 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	ingress_dev = __dev_get_by_index(block->net,
block             173 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
block             325 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 				 struct mlxsw_sp_acl_block *block,
block             355 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
block             414 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
block             453 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
block             459 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 			    struct mlxsw_sp_acl_block *block,
block             467 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
block             481 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
block             506 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 			     struct mlxsw_sp_acl_block *block,
block             512 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
block             528 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 			  struct mlxsw_sp_acl_block *block,
block             538 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
block             564 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 				 struct mlxsw_sp_acl_block *block,
block             572 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
block             575 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
block             585 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 				   struct mlxsw_sp_acl_block *block,
block             590 drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
block             594 drivers/net/ethernet/mscc/ocelot_ace.c static void ocelot_ace_rule_add(struct ocelot_acl_block *block,
block             600 drivers/net/ethernet/mscc/ocelot_ace.c 	block->count++;
block             602 drivers/net/ethernet/mscc/ocelot_ace.c 	if (list_empty(&block->rules)) {
block             603 drivers/net/ethernet/mscc/ocelot_ace.c 		list_add(&rule->list, &block->rules);
block             607 drivers/net/ethernet/mscc/ocelot_ace.c 	list_for_each_safe(pos, n, &block->rules) {
block             615 drivers/net/ethernet/mscc/ocelot_ace.c static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block,
block             621 drivers/net/ethernet/mscc/ocelot_ace.c 	list_for_each_entry(tmp, &block->rules, list) {
block             630 drivers/net/ethernet/mscc/ocelot_ace.c ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
block             635 drivers/net/ethernet/mscc/ocelot_ace.c 	list_for_each_entry(tmp, &block->rules, list) {
block             666 drivers/net/ethernet/mscc/ocelot_ace.c static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
block             672 drivers/net/ethernet/mscc/ocelot_ace.c 	list_for_each_safe(pos, q, &block->rules) {
block             680 drivers/net/ethernet/mscc/ocelot_ace.c 	block->count--;
block             727 drivers/net/ethernet/mscc/ocelot_ace.c 	struct ocelot_acl_block *block;
block             729 drivers/net/ethernet/mscc/ocelot_ace.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             730 drivers/net/ethernet/mscc/ocelot_ace.c 	if (!block)
block             733 drivers/net/ethernet/mscc/ocelot_ace.c 	INIT_LIST_HEAD(&block->rules);
block             734 drivers/net/ethernet/mscc/ocelot_ace.c 	block->count = 0;
block             735 drivers/net/ethernet/mscc/ocelot_ace.c 	block->ocelot = ocelot;
block             737 drivers/net/ethernet/mscc/ocelot_ace.c 	return block;
block             740 drivers/net/ethernet/mscc/ocelot_ace.c static void ocelot_acl_block_destroy(struct ocelot_acl_block *block)
block             742 drivers/net/ethernet/mscc/ocelot_ace.c 	kfree(block);
block              12 drivers/net/ethernet/mscc/ocelot_flower.c 	struct ocelot_acl_block *block;
block             172 drivers/net/ethernet/mscc/ocelot_flower.c 					       struct ocelot_port_block *block)
block             180 drivers/net/ethernet/mscc/ocelot_flower.c 	rule->port = block->port;
block             181 drivers/net/ethernet/mscc/ocelot_flower.c 	rule->chip_port = block->port->chip_port;
block             291 drivers/net/ethernet/mscc/ocelot_flower.c static void ocelot_port_block_destroy(struct ocelot_port_block *block)
block             293 drivers/net/ethernet/mscc/ocelot_flower.c 	kfree(block);
block             313 drivers/net/ethernet/mscc/ocelot_flower.c 	block_cb = flow_block_cb_lookup(f->block,
block             347 drivers/net/ethernet/mscc/ocelot_flower.c 	block_cb = flow_block_cb_lookup(f->block,
block             172 drivers/net/ethernet/mscc/ocelot_tc.c 		block_cb = flow_block_cb_lookup(f->block, cb, port);
block            1987 drivers/net/ethernet/neterion/vxge/vxge-config.c __vxge_hw_ring_block_memblock_idx(u8 *block)
block            1989 drivers/net/ethernet/neterion/vxge/vxge-config.c 	return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
block            1997 drivers/net/ethernet/neterion/vxge/vxge-config.c __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
block            1999 drivers/net/ethernet/neterion/vxge/vxge-config.c 	*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
block            2008 drivers/net/ethernet/neterion/vxge/vxge-config.c __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
block            2010 drivers/net/ethernet/neterion/vxge/vxge-config.c 	*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
block            1501 drivers/net/ethernet/netronome/nfp/flower/offload.c 		block_cb = flow_block_cb_lookup(f->block,
block            1623 drivers/net/ethernet/netronome/nfp/flower/offload.c 		block_cb = flow_block_cb_lookup(f->block,
block            2170 drivers/net/ethernet/qlogic/qed/qed_debug.c 		struct block_defs *block = s_block_defs[i];
block            2172 drivers/net/ethernet/qlogic/qed/qed_debug.c 		dev_data->block_in_reset[i] = block->has_reset_bit &&
block            2173 drivers/net/ethernet/qlogic/qed/qed_debug.c 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
block            2265 drivers/net/ethernet/qlogic/qed/qed_debug.c 	struct block_defs *block = s_block_defs[block_id];
block            2269 drivers/net/ethernet/qlogic/qed/qed_debug.c 	if (block->associated_to_storm &&
block            2271 drivers/net/ethernet/qlogic/qed/qed_debug.c 				       (enum dbg_storms)block->storm_id))
block            2356 drivers/net/ethernet/qlogic/qed/qed_debug.c 		struct block_defs *block = s_block_defs[block_id];
block            2358 drivers/net/ethernet/qlogic/qed/qed_debug.c 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
block            2359 drivers/net/ethernet/qlogic/qed/qed_debug.c 		    block->unreset)
block            2360 drivers/net/ethernet/qlogic/qed/qed_debug.c 			reg_val[block->reset_reg] |=
block            2361 drivers/net/ethernet/qlogic/qed/qed_debug.c 			    BIT(block->reset_bit_offset);
block            3874 drivers/net/ethernet/qlogic/qed/qed_debug.c 	struct block_defs *block = s_block_defs[block_id];
block            3876 drivers/net/ethernet/qlogic/qed/qed_debug.c 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
block            3877 drivers/net/ethernet/qlogic/qed/qed_debug.c 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
block            3878 drivers/net/ethernet/qlogic/qed/qed_debug.c 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
block            3879 drivers/net/ethernet/qlogic/qed/qed_debug.c 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
block            3880 drivers/net/ethernet/qlogic/qed/qed_debug.c 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
block            3898 drivers/net/ethernet/qlogic/qed/qed_debug.c 			struct block_defs *block = s_block_defs[block_id];
block            3900 drivers/net/ethernet/qlogic/qed/qed_debug.c 			if (block->dbg_client_id[dev_data->chip_id] !=
block            3902 drivers/net/ethernet/qlogic/qed/qed_debug.c 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
block            3917 drivers/net/ethernet/qlogic/qed/qed_debug.c 		struct block_defs *block = s_block_defs[block_id];
block            3922 drivers/net/ethernet/qlogic/qed/qed_debug.c 		if (block->dbg_client_id[dev_data->chip_id] ==
block            3935 drivers/net/ethernet/qlogic/qed/qed_debug.c 					       block->name,
block            3954 drivers/net/ethernet/qlogic/qed/qed_debug.c 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
block            3982 drivers/net/ethernet/qlogic/qed/qed_debug.c 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
block            3337 drivers/net/ethernet/qlogic/qed/qed_hsi.h 				  enum block_id block,
block             607 drivers/net/usb/usbnet.c 		goto block;
block             622 drivers/net/usb/usbnet.c block:
block            1324 drivers/net/wireless/ath/ath5k/ath5k.h 	spinlock_t		block;		/* protects beacon */
block            2150 drivers/net/wireless/ath/ath5k/base.c 	spin_lock_bh(&ah->block);
block            2176 drivers/net/wireless/ath/ath5k/base.c 	spin_unlock_bh(&ah->block);
block            2202 drivers/net/wireless/ath/ath5k/base.c 		spin_lock(&ah->block);
block            2204 drivers/net/wireless/ath/ath5k/base.c 		spin_unlock(&ah->block);
block            2578 drivers/net/wireless/ath/ath5k/base.c 	spin_lock_init(&ah->block);
block             302 drivers/net/wireless/ath/ath5k/mac80211-ops.c 		spin_lock_bh(&ah->block);
block             304 drivers/net/wireless/ath/ath5k/mac80211-ops.c 		spin_unlock_bh(&ah->block);
block            3146 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 				    u8 *block,
block            3158 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 		offset = block[it];
block            3161 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 		length = block[it+1];
block            3168 drivers/net/wireless/ath/ath9k/ar9003_eeprom.c 			memcpy(&mptr[spot], &block[it+2], length);
block             374 drivers/net/wireless/ath/wil6210/fw_inc.c 	const struct wil_fw_data_dwrite *block = d->data;
block             377 drivers/net/wireless/ath/wil6210/fw_inc.c 	if (size % sizeof(*block)) {
block             379 drivers/net/wireless/ath/wil6210/fw_inc.c 			   sizeof(*block), size);
block             382 drivers/net/wireless/ath/wil6210/fw_inc.c 	n = size / sizeof(*block);
block             386 drivers/net/wireless/ath/wil6210/fw_inc.c 		u32 m = le32_to_cpu(block[i].mask);
block             387 drivers/net/wireless/ath/wil6210/fw_inc.c 		u32 v = le32_to_cpu(block[i].value);
block             390 drivers/net/wireless/ath/wil6210/fw_inc.c 		if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
block             397 drivers/net/wireless/ath/wil6210/fw_inc.c 			   le32_to_cpu(block[i].addr), y, x, v, m);
block             431 drivers/net/wireless/ath/wil6210/fw_inc.c 	const struct wil_fw_data_gw *block = d->data;
block             439 drivers/net/wireless/ath/wil6210/fw_inc.c 	if (size < sizeof(*d) + sizeof(*block)) {
block             444 drivers/net/wireless/ath/wil6210/fw_inc.c 	if ((size - sizeof(*d)) % sizeof(*block)) {
block             447 drivers/net/wireless/ath/wil6210/fw_inc.c 			   sizeof(*block), size - sizeof(*d));
block             450 drivers/net/wireless/ath/wil6210/fw_inc.c 	n = (size - sizeof(*d)) / sizeof(*block);
block             476 drivers/net/wireless/ath/wil6210/fw_inc.c 		u32 a = le32_to_cpu(block[i].addr);
block             477 drivers/net/wireless/ath/wil6210/fw_inc.c 		u32 v = le32_to_cpu(block[i].value);
block             495 drivers/net/wireless/ath/wil6210/fw_inc.c 	const struct wil_fw_data_gw4 *block = d->data;
block             497 drivers/net/wireless/ath/wil6210/fw_inc.c 	void __iomem *gwa_val[ARRAY_SIZE(block->value)];
block             503 drivers/net/wireless/ath/wil6210/fw_inc.c 	if (size < sizeof(*d) + sizeof(*block)) {
block             508 drivers/net/wireless/ath/wil6210/fw_inc.c 	if ((size - sizeof(*d)) % sizeof(*block)) {
block             511 drivers/net/wireless/ath/wil6210/fw_inc.c 			   sizeof(*block), size - sizeof(*d));
block             514 drivers/net/wireless/ath/wil6210/fw_inc.c 	n = (size - sizeof(*d)) / sizeof(*block);
block             524 drivers/net/wireless/ath/wil6210/fw_inc.c 	for (k = 0; k < ARRAY_SIZE(block->value); k++)
block             545 drivers/net/wireless/ath/wil6210/fw_inc.c 		u32 a = le32_to_cpu(block[i].addr);
block             546 drivers/net/wireless/ath/wil6210/fw_inc.c 		u32 v[ARRAY_SIZE(block->value)];
block             548 drivers/net/wireless/ath/wil6210/fw_inc.c 		for (k = 0; k < ARRAY_SIZE(block->value); k++)
block             549 drivers/net/wireless/ath/wil6210/fw_inc.c 			v[k] = le32_to_cpu(block[i].value[k]);
block             555 drivers/net/wireless/ath/wil6210/fw_inc.c 		for (k = 0; k < ARRAY_SIZE(block->value); k++)
block             320 drivers/net/wireless/atmel/at76c50x-usb.c 				  void *block, int size)
block             324 drivers/net/wireless/atmel/at76c50x-usb.c 			       USB_RECIP_INTERFACE, blockno, 0, block, size,
block             371 drivers/net/wireless/atmel/at76c50x-usb.c 	u8 *block = NULL;
block             387 drivers/net/wireless/atmel/at76c50x-usb.c 	block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
block             388 drivers/net/wireless/atmel/at76c50x-usb.c 	if (!block) {
block             440 drivers/net/wireless/atmel/at76c50x-usb.c 			memcpy(block, buf, bsize);
block             445 drivers/net/wireless/atmel/at76c50x-usb.c 			    at76_load_int_fw_block(udev, blockno, block, bsize);
block             505 drivers/net/wireless/atmel/at76c50x-usb.c 	kfree(block);
block             577 drivers/net/wireless/atmel/at76c50x-usb.c 					 void *block, int size)
block             581 drivers/net/wireless/atmel/at76c50x-usb.c 			       0x0802, blockno, block, size,
block            1260 drivers/net/wireless/atmel/at76c50x-usb.c 	u8 *block;
block            1275 drivers/net/wireless/atmel/at76c50x-usb.c 	block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
block            1276 drivers/net/wireless/atmel/at76c50x-usb.c 	if (!block)
block            1284 drivers/net/wireless/atmel/at76c50x-usb.c 		memcpy(block, buf, bsize);
block            1288 drivers/net/wireless/atmel/at76c50x-usb.c 		ret = at76_load_ext_fw_block(udev, blockno, block, bsize);
block            1307 drivers/net/wireless/atmel/at76c50x-usb.c 	kfree(block);
block            1150 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
block            1171 drivers/net/wireless/intel/iwlwifi/fw/dbg.c 	memcpy(range->data, fwrt->trans->dbg.fw_mon[idx].block,
block             100 drivers/net/wireless/intel/iwlwifi/fw/paging.c 	struct page *block;
block             129 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		block = alloc_pages(GFP_KERNEL, order);
block             130 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		if (!block) {
block             136 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
block             139 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		phys = dma_map_page(fwrt->trans->dev, block, 0,
block             229 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
block             231 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		int len = block->fw_paging_size;
block             247 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		} else if (block->fw_paging_size > remaining) {
block             255 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		memcpy(page_address(block->fw_paging_block),
block             258 drivers/net/wireless/intel/iwlwifi/fw/paging.c 					   block->fw_paging_phys,
block             259 drivers/net/wireless/intel/iwlwifi/fw/paging.c 					   block->fw_paging_size,
block             266 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		offset += block->fw_paging_size;
block             586 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
block             689 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	void *block;
block            1099 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 					    bool block)
block            1107 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 		trans->ops->block_txq_ptrs(trans, block);
block              73 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c 				  dram->paging[i].block,
block             373 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	int block;
block             778 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	dram->block = dma_alloc_coherent(trans->dev, sec->len,
block             781 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	if (!dram->block)
block             785 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	memcpy(dram->block, sec->data, sec->len);
block             802 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 				  dram->fw[i].block, dram->fw[i].physical);
block             198 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				  trans->dbg.fw_mon[i].block,
block             200 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		trans->dbg.fw_mon[i].block = NULL;
block             237 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
block            2268 drivers/net/wireless/intel/iwlwifi/pcie/trans.c static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
block            2281 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		if (!block && !(WARN_ON_ONCE(!txq->block))) {
block            2282 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			txq->block--;
block            2283 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			if (!txq->block) {
block            2287 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		} else if (block) {
block            2288 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			txq->block++;
block            2835 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
block            3137 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			       trans->dbg.fw_mon[0].block,
block            3356 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			       trans->init_dram.paging[i].block, page_len);
block             320 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (!txq->block)
block             231 drivers/net/wireless/marvell/libertas/cmd.c int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block)
block             254 drivers/net/wireless/marvell/libertas/cmd.c 	if (block)
block             140 drivers/net/wireless/marvell/libertas/cmd.h int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block);
block              53 drivers/net/wireless/st/cw1200/fwio.c 	int ret, block, num_blocks;
block             180 drivers/net/wireless/st/cw1200/fwio.c 	for (block = 0; block < num_blocks; block++) {
block             397 drivers/nvdimm/btt.c 	arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
block             511 drivers/nvdimm/btt.c 		u32 lba = arena->freelist[lane].block;
block             557 drivers/nvdimm/btt.c 		arena->freelist[i].block = log_oldmap;
block            1337 drivers/nvdimm/btt.c 		if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
block            1352 drivers/nvdimm/btt.c 		new_postmap = arena->freelist[lane].block;
block             119 drivers/nvdimm/btt.h 	u32 block;
block             318 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(csw-read-block,			0x06),
block             126 drivers/platform/chrome/cros_ec_chardev.c 						    bool fetch, bool block)
block             132 drivers/platform/chrome/cros_ec_chardev.c 	if (!block && list_empty(&priv->events)) {
block              68 drivers/platform/chrome/cros_ec_vbc.c 	const size_t data_sz = sizeof(params->block);
block              80 drivers/platform/chrome/cros_ec_vbc.c 	memcpy(params->block, buf, data_sz);
block             521 drivers/platform/x86/dell-laptop.c 		int block = rfkill_blocked(rfkill);
block             523 drivers/platform/x86/dell-laptop.c 				   1 | (radio << 8) | (block << 16), 0, 0, 0);
block             115 drivers/platform/x86/wmi.c 	struct guid_block *block;
block             121 drivers/platform/x86/wmi.c 		block = &wblock->gblock;
block             123 drivers/platform/x86/wmi.c 		if (memcmp(block->guid, &guid_input, 16) == 0) {
block             180 drivers/platform/x86/wmi.c 	struct guid_block *block = NULL;
block             185 drivers/platform/x86/wmi.c 	block = &wblock->gblock;
block             188 drivers/platform/x86/wmi.c 	snprintf(method, 5, "WE%02X", block->notify_id);
block             254 drivers/platform/x86/wmi.c 	struct guid_block *block = NULL;
block             263 drivers/platform/x86/wmi.c 	block = &wblock->gblock;
block             266 drivers/platform/x86/wmi.c 	if (!(block->flags & ACPI_WMI_METHOD))
block             269 drivers/platform/x86/wmi.c 	if (block->instance_count <= instance)
block             282 drivers/platform/x86/wmi.c 		if (block->flags & ACPI_WMI_STRING) {
block             291 drivers/platform/x86/wmi.c 	strncat(method, block->object_id, 2);
block             302 drivers/platform/x86/wmi.c 	struct guid_block *block = NULL;
block             313 drivers/platform/x86/wmi.c 	block = &wblock->gblock;
block             316 drivers/platform/x86/wmi.c 	if (block->instance_count <= instance)
block             320 drivers/platform/x86/wmi.c 	if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
block             335 drivers/platform/x86/wmi.c 	if (block->flags & ACPI_WMI_EXPENSIVE) {
block             336 drivers/platform/x86/wmi.c 		strncat(wc_method, block->object_id, 2);
block             347 drivers/platform/x86/wmi.c 	strncat(method, block->object_id, 2);
block             355 drivers/platform/x86/wmi.c 	if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
block             408 drivers/platform/x86/wmi.c 	struct guid_block *block = NULL;
block             421 drivers/platform/x86/wmi.c 	block = &wblock->gblock;
block             424 drivers/platform/x86/wmi.c 	if (block->instance_count <= instance)
block             428 drivers/platform/x86/wmi.c 	if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
block             436 drivers/platform/x86/wmi.c 	if (block->flags & ACPI_WMI_STRING) {
block             444 drivers/platform/x86/wmi.c 	strncat(method, block->object_id, 2);
block             520 drivers/platform/x86/wmi.c 	struct wmi_block *block;
block             530 drivers/platform/x86/wmi.c 	list_for_each_entry(block, &wmi_block_list, list) {
block             533 drivers/platform/x86/wmi.c 		if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
block             534 drivers/platform/x86/wmi.c 			if (block->handler &&
block             535 drivers/platform/x86/wmi.c 			    block->handler != wmi_notify_debug)
block             538 drivers/platform/x86/wmi.c 			block->handler = handler;
block             539 drivers/platform/x86/wmi.c 			block->handler_data = data;
block             541 drivers/platform/x86/wmi.c 			wmi_status = wmi_method_enable(block, 1);
block             559 drivers/platform/x86/wmi.c 	struct wmi_block *block;
block             569 drivers/platform/x86/wmi.c 	list_for_each_entry(block, &wmi_block_list, list) {
block             572 drivers/platform/x86/wmi.c 		if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
block             573 drivers/platform/x86/wmi.c 			if (!block->handler ||
block             574 drivers/platform/x86/wmi.c 			    block->handler == wmi_notify_debug)
block             578 drivers/platform/x86/wmi.c 				block->handler = wmi_notify_debug;
block             581 drivers/platform/x86/wmi.c 				wmi_status = wmi_method_enable(block, 0);
block             582 drivers/platform/x86/wmi.c 				block->handler = NULL;
block             583 drivers/platform/x86/wmi.c 				block->handler_data = NULL;
block            1276 drivers/platform/x86/wmi.c 	struct guid_block *block;
block            1281 drivers/platform/x86/wmi.c 		block = &wblock->gblock;
block            1284 drivers/platform/x86/wmi.c 		    (block->flags & ACPI_WMI_EVENT) &&
block            1285 drivers/platform/x86/wmi.c 		    (block->notify_id == event))
block             840 drivers/power/supply/bq27xxx_battery.c 	u8 block;
block             847 drivers/power/supply/bq27xxx_battery.c 	.block = (di)->dm_regs[i].offset / BQ27XXX_DM_SZ, \
block             854 drivers/power/supply/bq27xxx_battery.c 	    buf->block == reg->offset / BQ27XXX_DM_SZ)
block            1045 drivers/power/supply/bq27xxx_battery.c 	ret = bq27xxx_write(di, BQ27XXX_DM_BLOCK, buf->block, true);
block            1192 drivers/power/supply/bq27xxx_battery.c 	ret = bq27xxx_write(di, BQ27XXX_DM_BLOCK, buf->block, true);
block            1256 drivers/power/supply/bq27xxx_battery.c 		bool same = bd.class == bt.class && bd.block == bt.block;
block             986 drivers/rapidio/devices/rio_mport_cdev.c 				    (void __user *)(uintptr_t)transaction.block,
block             998 drivers/rapidio/devices/rio_mport_cdev.c 	if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
block             168 drivers/s390/block/dasd.c 	struct dasd_block *block;
block             170 drivers/s390/block/dasd.c 	block = kzalloc(sizeof(*block), GFP_ATOMIC);
block             171 drivers/s390/block/dasd.c 	if (!block)
block             174 drivers/s390/block/dasd.c 	atomic_set(&block->open_count, -1);
block             176 drivers/s390/block/dasd.c 	atomic_set(&block->tasklet_scheduled, 0);
block             177 drivers/s390/block/dasd.c 	tasklet_init(&block->tasklet, dasd_block_tasklet,
block             178 drivers/s390/block/dasd.c 		     (unsigned long) block);
block             179 drivers/s390/block/dasd.c 	INIT_LIST_HEAD(&block->ccw_queue);
block             180 drivers/s390/block/dasd.c 	spin_lock_init(&block->queue_lock);
block             181 drivers/s390/block/dasd.c 	INIT_LIST_HEAD(&block->format_list);
block             182 drivers/s390/block/dasd.c 	spin_lock_init(&block->format_lock);
block             183 drivers/s390/block/dasd.c 	timer_setup(&block->timer, dasd_block_timeout, 0);
block             184 drivers/s390/block/dasd.c 	spin_lock_init(&block->profile.lock);
block             186 drivers/s390/block/dasd.c 	return block;
block             193 drivers/s390/block/dasd.c void dasd_free_block(struct dasd_block *block)
block             195 drivers/s390/block/dasd.c 	kfree(block);
block             212 drivers/s390/block/dasd.c 	if (device->block) {
block             213 drivers/s390/block/dasd.c 		rc = dasd_alloc_queue(device->block);
block             232 drivers/s390/block/dasd.c 	if (device->block)
block             233 drivers/s390/block/dasd.c 		dasd_free_queue(device->block);
block             258 drivers/s390/block/dasd.c 	struct dasd_block *block = device->block;
block             262 drivers/s390/block/dasd.c 	if (block) {
block             263 drivers/s390/block/dasd.c 		rc = dasd_gendisk_alloc(block);
block             266 drivers/s390/block/dasd.c 		block->debugfs_dentry =
block             267 drivers/s390/block/dasd.c 			dasd_debugfs_setup(block->gdp->disk_name,
block             269 drivers/s390/block/dasd.c 		dasd_profile_init(&block->profile, block->debugfs_dentry);
block             271 drivers/s390/block/dasd.c 			dasd_profile_on(&device->block->profile);
block             304 drivers/s390/block/dasd.c 	if (device->block) {
block             305 drivers/s390/block/dasd.c 		dasd_profile_exit(&device->block->profile);
block             306 drivers/s390/block/dasd.c 		debugfs_remove(device->block->debugfs_dentry);
block             307 drivers/s390/block/dasd.c 		dasd_gendisk_free(device->block);
block             308 drivers/s390/block/dasd.c 		dasd_block_clear_timer(device->block);
block             343 drivers/s390/block/dasd.c 	struct dasd_block *block;
block             347 drivers/s390/block/dasd.c 	block = device->block;
block             349 drivers/s390/block/dasd.c 	if (block) {
block             350 drivers/s390/block/dasd.c 		if (block->base->discipline->do_analysis != NULL)
block             351 drivers/s390/block/dasd.c 			rc = block->base->discipline->do_analysis(block);
block             355 drivers/s390/block/dasd.c 				disk = device->block->gdp;
block             363 drivers/s390/block/dasd.c 			device->discipline->setup_blk_queue(block);
block             364 drivers/s390/block/dasd.c 		set_capacity(block->gdp,
block             365 drivers/s390/block/dasd.c 			     block->blocks << block->s2b_shift);
block             367 drivers/s390/block/dasd.c 		rc = dasd_scan_partitions(block);
block             384 drivers/s390/block/dasd.c 	if (device->block)
block             386 drivers/s390/block/dasd.c 			list_empty(&device->block->ccw_queue);
block             401 drivers/s390/block/dasd.c 	if (device->block) {
block             402 drivers/s390/block/dasd.c 		struct dasd_block *block = device->block;
block             403 drivers/s390/block/dasd.c 		rc = dasd_flush_block_queue(block);
block             408 drivers/s390/block/dasd.c 		dasd_destroy_partitions(block);
block             409 drivers/s390/block/dasd.c 		block->blocks = 0;
block             410 drivers/s390/block/dasd.c 		block->bp_block = 0;
block             411 drivers/s390/block/dasd.c 		block->s2b_shift = 0;
block             438 drivers/s390/block/dasd.c 	if (device->block) {
block             439 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block             441 drivers/s390/block/dasd.c 			disk = device->block->gdp;
block             445 drivers/s390/block/dasd.c 		disk = device->block->bdev->bd_disk;
block             471 drivers/s390/block/dasd.c 	if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
block             472 drivers/s390/block/dasd.c 		disk = device->block->bdev->bd_disk;
block             703 drivers/s390/block/dasd.c static void dasd_profile_start(struct dasd_block *block,
block             713 drivers/s390/block/dasd.c 	if (dasd_global_profile_level || block->profile.data)
block             714 drivers/s390/block/dasd.c 		list_for_each(l, &block->ccw_queue)
block             726 drivers/s390/block/dasd.c 	spin_lock(&block->profile.lock);
block             727 drivers/s390/block/dasd.c 	if (block->profile.data) {
block             728 drivers/s390/block/dasd.c 		block->profile.data->dasd_io_nr_req[counter]++;
block             730 drivers/s390/block/dasd.c 			block->profile.data->dasd_read_nr_req[counter]++;
block             732 drivers/s390/block/dasd.c 	spin_unlock(&block->profile.lock);
block             813 drivers/s390/block/dasd.c static void dasd_profile_end(struct dasd_block *block,
block             826 drivers/s390/block/dasd.c 	      block->profile.data ||
block             858 drivers/s390/block/dasd.c 					  cqr->startdev != block->base,
block             868 drivers/s390/block/dasd.c 	spin_lock(&block->profile.lock);
block             869 drivers/s390/block/dasd.c 	if (block->profile.data) {
block             870 drivers/s390/block/dasd.c 		data = block->profile.data;
block             875 drivers/s390/block/dasd.c 		dasd_profile_end_add_data(block->profile.data,
block             876 drivers/s390/block/dasd.c 					  cqr->startdev != block->base,
block             884 drivers/s390/block/dasd.c 	spin_unlock(&block->profile.lock);
block             894 drivers/s390/block/dasd.c 					  cqr->startdev != block->base,
block            1155 drivers/s390/block/dasd.c #define dasd_profile_start(block, cqr, req) do {} while (0)
block            1156 drivers/s390/block/dasd.c #define dasd_profile_end(block, cqr, req) do {} while (0)
block            1439 drivers/s390/block/dasd.c 	if (((cqr->block &&
block            1440 drivers/s390/block/dasd.c 	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
block            1626 drivers/s390/block/dasd.c 	if (device->block) {
block            1627 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block            1628 drivers/s390/block/dasd.c 		if (device->block->request_queue)
block            1629 drivers/s390/block/dasd.c 			blk_mq_run_hw_queues(device->block->request_queue,
block            1642 drivers/s390/block/dasd.c static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
block            1647 drivers/s390/block/dasd.c 	if (!block)
block            1649 drivers/s390/block/dasd.c 	device = block->base;
block            1782 drivers/s390/block/dasd.c 	if (dasd_ese_needs_format(cqr->block, irb)) {
block            1917 drivers/s390/block/dasd.c 	if (!ref_cqr->block)
block            1923 drivers/s390/block/dasd.c 		    ref_cqr->block == cqr->block) {
block            1992 drivers/s390/block/dasd.c 	struct dasd_block *block;
block            1997 drivers/s390/block/dasd.c 		block = cqr->block;
block            1998 drivers/s390/block/dasd.c 		if (!block) {
block            2001 drivers/s390/block/dasd.c 			spin_lock_bh(&block->queue_lock);
block            2003 drivers/s390/block/dasd.c 			spin_unlock_bh(&block->queue_lock);
block            2639 drivers/s390/block/dasd.c 	if (device->block)
block            2640 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block            2707 drivers/s390/block/dasd.c 	struct dasd_block *block;
block            2709 drivers/s390/block/dasd.c 	block = from_timer(block, t, timer);
block            2710 drivers/s390/block/dasd.c 	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
block            2712 drivers/s390/block/dasd.c 	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
block            2713 drivers/s390/block/dasd.c 	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
block            2714 drivers/s390/block/dasd.c 	dasd_schedule_block_bh(block);
block            2715 drivers/s390/block/dasd.c 	blk_mq_run_hw_queues(block->request_queue, true);
block            2721 drivers/s390/block/dasd.c void dasd_block_set_timer(struct dasd_block *block, int expires)
block            2724 drivers/s390/block/dasd.c 		del_timer(&block->timer);
block            2726 drivers/s390/block/dasd.c 		mod_timer(&block->timer, jiffies + expires);
block            2733 drivers/s390/block/dasd.c void dasd_block_clear_timer(struct dasd_block *block)
block            2735 drivers/s390/block/dasd.c 	del_timer(&block->timer);
block            2763 drivers/s390/block/dasd.c 	dasd_profile_end(cqr->block, cqr, req);
block            2766 drivers/s390/block/dasd.c 	status = cqr->block->base->discipline->free_cp(cqr, req);
block            2814 drivers/s390/block/dasd.c static void __dasd_process_block_ccw_queue(struct dasd_block *block,
block            2821 drivers/s390/block/dasd.c 	struct dasd_device *base = block->base;
block            2825 drivers/s390/block/dasd.c 	list_for_each_safe(l, n, &block->ccw_queue) {
block            2880 drivers/s390/block/dasd.c 	dasd_schedule_block_bh(cqr->block);
block            2883 drivers/s390/block/dasd.c static void __dasd_block_start_head(struct dasd_block *block)
block            2887 drivers/s390/block/dasd.c 	if (list_empty(&block->ccw_queue))
block            2893 drivers/s390/block/dasd.c 	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
block            2896 drivers/s390/block/dasd.c 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
block            2900 drivers/s390/block/dasd.c 			dasd_schedule_block_bh(block);
block            2904 drivers/s390/block/dasd.c 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
block            2906 drivers/s390/block/dasd.c 		    (!dasd_eer_enabled(block->base))) {
block            2909 drivers/s390/block/dasd.c 			dasd_schedule_block_bh(block);
block            2913 drivers/s390/block/dasd.c 		if (block->base->stopped)
block            2918 drivers/s390/block/dasd.c 			cqr->startdev = block->base;
block            2934 drivers/s390/block/dasd.c 	struct dasd_block *block = (struct dasd_block *) data;
block            2940 drivers/s390/block/dasd.c 	atomic_set(&block->tasklet_scheduled, 0);
block            2942 drivers/s390/block/dasd.c 	spin_lock_irq(&block->queue_lock);
block            2944 drivers/s390/block/dasd.c 	__dasd_process_block_ccw_queue(block, &final_queue);
block            2945 drivers/s390/block/dasd.c 	spin_unlock_irq(&block->queue_lock);
block            2957 drivers/s390/block/dasd.c 	spin_lock_irq(&block->queue_lock);
block            2959 drivers/s390/block/dasd.c 	__dasd_block_start_head(block);
block            2960 drivers/s390/block/dasd.c 	spin_unlock_irq(&block->queue_lock);
block            2964 drivers/s390/block/dasd.c 	dasd_put_device(block->base);
block            2978 drivers/s390/block/dasd.c 	struct dasd_block *block = cqr->block;
block            2981 drivers/s390/block/dasd.c 	if (!block)
block            2996 drivers/s390/block/dasd.c static int dasd_flush_block_queue(struct dasd_block *block)
block            3004 drivers/s390/block/dasd.c 	spin_lock_bh(&block->queue_lock);
block            3007 drivers/s390/block/dasd.c 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
block            3025 drivers/s390/block/dasd.c 	spin_unlock_bh(&block->queue_lock);
block            3032 drivers/s390/block/dasd.c 			spin_lock_bh(&block->queue_lock);
block            3033 drivers/s390/block/dasd.c 			__dasd_process_erp(block->base, cqr);
block            3034 drivers/s390/block/dasd.c 			spin_unlock_bh(&block->queue_lock);
block            3052 drivers/s390/block/dasd.c void dasd_schedule_block_bh(struct dasd_block *block)
block            3055 drivers/s390/block/dasd.c 	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
block            3058 drivers/s390/block/dasd.c 	dasd_get_device(block->base);
block            3059 drivers/s390/block/dasd.c 	tasklet_hi_schedule(&block->tasklet);
block            3075 drivers/s390/block/dasd.c 	struct dasd_block *block = hctx->queue->queuedata;
block            3082 drivers/s390/block/dasd.c 	basedev = block->base;
block            3120 drivers/s390/block/dasd.c 	cqr = basedev->discipline->build_cp(basedev, block, req);
block            3143 drivers/s390/block/dasd.c 	spin_lock(&block->queue_lock);
block            3144 drivers/s390/block/dasd.c 	list_add_tail(&cqr->blocklist, &block->ccw_queue);
block            3146 drivers/s390/block/dasd.c 	dasd_profile_start(block, cqr, req);
block            3147 drivers/s390/block/dasd.c 	dasd_schedule_block_bh(block);
block            3148 drivers/s390/block/dasd.c 	spin_unlock(&block->queue_lock);
block            3165 drivers/s390/block/dasd.c 	struct dasd_block *block = req->q->queuedata;
block            3176 drivers/s390/block/dasd.c 	device = cqr->startdev ? cqr->startdev : block->base;
block            3185 drivers/s390/block/dasd.c 	spin_lock(&block->queue_lock);
block            3198 drivers/s390/block/dasd.c 					 &block->ccw_queue, blocklist) {
block            3224 drivers/s390/block/dasd.c 	dasd_schedule_block_bh(block);
block            3225 drivers/s390/block/dasd.c 	spin_unlock(&block->queue_lock);
block            3268 drivers/s390/block/dasd.c static int dasd_alloc_queue(struct dasd_block *block)
block            3272 drivers/s390/block/dasd.c 	block->tag_set.ops = &dasd_mq_ops;
block            3273 drivers/s390/block/dasd.c 	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
block            3274 drivers/s390/block/dasd.c 	block->tag_set.nr_hw_queues = nr_hw_queues;
block            3275 drivers/s390/block/dasd.c 	block->tag_set.queue_depth = queue_depth;
block            3276 drivers/s390/block/dasd.c 	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
block            3277 drivers/s390/block/dasd.c 	block->tag_set.numa_node = NUMA_NO_NODE;
block            3279 drivers/s390/block/dasd.c 	rc = blk_mq_alloc_tag_set(&block->tag_set);
block            3283 drivers/s390/block/dasd.c 	block->request_queue = blk_mq_init_queue(&block->tag_set);
block            3284 drivers/s390/block/dasd.c 	if (IS_ERR(block->request_queue))
block            3285 drivers/s390/block/dasd.c 		return PTR_ERR(block->request_queue);
block            3287 drivers/s390/block/dasd.c 	block->request_queue->queuedata = block;
block            3295 drivers/s390/block/dasd.c static void dasd_free_queue(struct dasd_block *block)
block            3297 drivers/s390/block/dasd.c 	if (block->request_queue) {
block            3298 drivers/s390/block/dasd.c 		blk_cleanup_queue(block->request_queue);
block            3299 drivers/s390/block/dasd.c 		blk_mq_free_tag_set(&block->tag_set);
block            3300 drivers/s390/block/dasd.c 		block->request_queue = NULL;
block            3313 drivers/s390/block/dasd.c 	atomic_inc(&base->block->open_count);
block            3352 drivers/s390/block/dasd.c 	atomic_dec(&base->block->open_count);
block            3361 drivers/s390/block/dasd.c 		atomic_dec(&base->block->open_count);
block            3383 drivers/s390/block/dasd.c 	base->discipline->fill_geometry(base->block, geo);
block            3384 drivers/s390/block/dasd.c 	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
block            3517 drivers/s390/block/dasd.c 	struct dasd_block *block;
block            3540 drivers/s390/block/dasd.c 	block = device->block;
block            3546 drivers/s390/block/dasd.c 	if (block)
block            3547 drivers/s390/block/dasd.c 		dasd_free_block(block);
block            3625 drivers/s390/block/dasd.c 		if (device->block)
block            3626 drivers/s390/block/dasd.c 			dasd_free_block(device->block);
block            3642 drivers/s390/block/dasd.c 	struct dasd_block *block;
block            3660 drivers/s390/block/dasd.c 	if (device->block) {
block            3661 drivers/s390/block/dasd.c 		max_count = device->block->bdev ? 0 : -1;
block            3662 drivers/s390/block/dasd.c 		open_count = atomic_read(&device->block->open_count);
block            3707 drivers/s390/block/dasd.c 		if (device->block) {
block            3708 drivers/s390/block/dasd.c 			rc = fsync_bdev(device->block->bdev);
block            3736 drivers/s390/block/dasd.c 	block = device->block;
block            3742 drivers/s390/block/dasd.c 	if (block)
block            3743 drivers/s390/block/dasd.c 		dasd_free_block(block);
block            3797 drivers/s390/block/dasd.c 	if (device->block) {
block            3798 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block            3799 drivers/s390/block/dasd.c 		if (device->block->request_queue)
block            3800 drivers/s390/block/dasd.c 			blk_mq_run_hw_queues(device->block->request_queue,
block            3947 drivers/s390/block/dasd.c 	if (device->block) {
block            3948 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block            3949 drivers/s390/block/dasd.c 		if (device->block->request_queue)
block            3950 drivers/s390/block/dasd.c 			blk_mq_run_hw_queues(device->block->request_queue, true);
block            4017 drivers/s390/block/dasd.c 		cqr->block->base->discipline->free_cp(
block            4041 drivers/s390/block/dasd.c 	if (device->block)
block            4042 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block            4102 drivers/s390/block/dasd.c 	if (device->block) {
block            4103 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block            4104 drivers/s390/block/dasd.c 		if (device->block->request_queue)
block            4105 drivers/s390/block/dasd.c 			blk_mq_run_hw_queues(device->block->request_queue,
block            4208 drivers/s390/block/dasd.c 	if (device->block)
block            4209 drivers/s390/block/dasd.c 		dasd_schedule_block_bh(device->block);
block              84 drivers/s390/block/dasd_3990_erp.c 	if (erp->block)
block              85 drivers/s390/block/dasd_3990_erp.c 		dasd_block_set_timer(erp->block, expires);
block            1438 drivers/s390/block/dasd_3990_erp.c 	if (cqr->block &&
block            1439 drivers/s390/block/dasd_3990_erp.c 	    (cqr->block->base != cqr->startdev)) {
block            1463 drivers/s390/block/dasd_3990_erp.c 				    dev_name(&cqr->block->base->cdev->dev));
block            1466 drivers/s390/block/dasd_3990_erp.c 		erp->startdev = cqr->block->base;
block            2404 drivers/s390/block/dasd_3990_erp.c 			dasd_block_set_timer(device->block, (HZ << 3));
block            2436 drivers/s390/block/dasd_3990_erp.c 	erp->block    = cqr->block;
block             734 drivers/s390/block/dasd_alias.c 	cqr->block = NULL;
block             754 drivers/s390/block/dasd_alias.c 		dasd_schedule_block_bh(device->block);
block             761 drivers/s390/block/dasd_alias.c 		dasd_schedule_block_bh(device->block);
block             766 drivers/s390/block/dasd_alias.c 			dasd_schedule_block_bh(device->block);
block             795 drivers/s390/block/dasd_devmap.c 	if (!device->block || !device->block->gdp ||
block             801 drivers/s390/block/dasd_devmap.c 	atomic_inc(&device->block->open_count);
block             804 drivers/s390/block/dasd_devmap.c 	set_disk_ro(device->block->gdp, val);
block             805 drivers/s390/block/dasd_devmap.c 	atomic_dec(&device->block->open_count);
block            1333 drivers/s390/block/dasd_devmap.c 	if (IS_ERR(device) || !device->block)
block            1341 drivers/s390/block/dasd_devmap.c 	q = device->block->request_queue;
block             149 drivers/s390/block/dasd_diag.c 	rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
block             225 drivers/s390/block/dasd_diag.c 	mdsk_init_io(device, device->block->bp_block, 0, NULL);
block             321 drivers/s390/block/dasd_diag.c 	struct dasd_block *block;
block             338 drivers/s390/block/dasd_diag.c 	block = dasd_alloc_block();
block             339 drivers/s390/block/dasd_diag.c 	if (IS_ERR(block)) {
block             344 drivers/s390/block/dasd_diag.c 		return PTR_ERR(block);
block             346 drivers/s390/block/dasd_diag.c 	device->block = block;
block             347 drivers/s390/block/dasd_diag.c 	block->base = device;
block             437 drivers/s390/block/dasd_diag.c 		block->blocks = (unsigned long) label->block_count;
block             439 drivers/s390/block/dasd_diag.c 		block->blocks = end_block;
block             440 drivers/s390/block/dasd_diag.c 	block->bp_block = bsize;
block             441 drivers/s390/block/dasd_diag.c 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
block             443 drivers/s390/block/dasd_diag.c 		block->s2b_shift++;
block             444 drivers/s390/block/dasd_diag.c 	rc = mdsk_init_io(device, block->bp_block, 0, NULL);
block             454 drivers/s390/block/dasd_diag.c 			(unsigned long) block->bp_block,
block             455 drivers/s390/block/dasd_diag.c 			(unsigned long) (block->blocks <<
block             456 drivers/s390/block/dasd_diag.c 					 block->s2b_shift) >> 1,
block             464 drivers/s390/block/dasd_diag.c 		device->block = NULL;
block             465 drivers/s390/block/dasd_diag.c 		dasd_free_block(block);
block             475 drivers/s390/block/dasd_diag.c dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
block             477 drivers/s390/block/dasd_diag.c 	if (dasd_check_blocksize(block->bp_block) != 0)
block             479 drivers/s390/block/dasd_diag.c 	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
block             481 drivers/s390/block/dasd_diag.c 	geo->sectors = 128 >> block->s2b_shift;
block             500 drivers/s390/block/dasd_diag.c 					       struct dasd_block *block,
block             520 drivers/s390/block/dasd_diag.c 	blksize = block->bp_block;
block             522 drivers/s390/block/dasd_diag.c 	first_rec = blk_rq_pos(req) >> block->s2b_shift;
block             524 drivers/s390/block/dasd_diag.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
block             531 drivers/s390/block/dasd_diag.c 		count += bv.bv_len >> (block->s2b_shift + 9);
block             563 drivers/s390/block/dasd_diag.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block             567 drivers/s390/block/dasd_diag.c 	cqr->block = block;
block             621 drivers/s390/block/dasd_diag.c static void dasd_diag_setup_blk_queue(struct dasd_block *block)
block             623 drivers/s390/block/dasd_diag.c 	unsigned int logical_block_size = block->bp_block;
block             624 drivers/s390/block/dasd_diag.c 	struct request_queue *q = block->request_queue;
block             627 drivers/s390/block/dasd_diag.c 	max = DIAG_MAX_BLOCKS << block->s2b_shift;
block             839 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            1486 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            1586 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            1693 drivers/s390/block/dasd_eckd.c 	if (cqr->block)
block            1694 drivers/s390/block/dasd_eckd.c 		data->base = cqr->block->base;
block            1770 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            1866 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            1974 drivers/s390/block/dasd_eckd.c 	struct dasd_block *block;
block            2035 drivers/s390/block/dasd_eckd.c 		block = dasd_alloc_block();
block            2036 drivers/s390/block/dasd_eckd.c 		if (IS_ERR(block)) {
block            2040 drivers/s390/block/dasd_eckd.c 			rc = PTR_ERR(block);
block            2043 drivers/s390/block/dasd_eckd.c 		device->block = block;
block            2044 drivers/s390/block/dasd_eckd.c 		block->base = device;
block            2113 drivers/s390/block/dasd_eckd.c 	dasd_free_block(device->block);
block            2114 drivers/s390/block/dasd_eckd.c 	device->block = NULL;
block            2186 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            2233 drivers/s390/block/dasd_eckd.c static int dasd_eckd_start_analysis(struct dasd_block *block)
block            2237 drivers/s390/block/dasd_eckd.c 	init_cqr = dasd_eckd_analysis_ccw(block->base);
block            2252 drivers/s390/block/dasd_eckd.c static int dasd_eckd_end_analysis(struct dasd_block *block)
block            2254 drivers/s390/block/dasd_eckd.c 	struct dasd_device *device = block->base;
block            2272 drivers/s390/block/dasd_eckd.c 		block->bp_block = DASD_RAW_BLOCKSIZE;
block            2274 drivers/s390/block/dasd_eckd.c 		block->s2b_shift = 3;
block            2325 drivers/s390/block/dasd_eckd.c 			block->bp_block = count_area->dl;
block            2327 drivers/s390/block/dasd_eckd.c 	if (block->bp_block == 0) {
block            2332 drivers/s390/block/dasd_eckd.c 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
block            2333 drivers/s390/block/dasd_eckd.c 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
block            2334 drivers/s390/block/dasd_eckd.c 		block->s2b_shift++;
block            2336 drivers/s390/block/dasd_eckd.c 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
block            2339 drivers/s390/block/dasd_eckd.c 	block->blocks = ((unsigned long) private->real_cyl *
block            2345 drivers/s390/block/dasd_eckd.c 		 "%s\n", (block->bp_block >> 10),
block            2348 drivers/s390/block/dasd_eckd.c 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
block            2349 drivers/s390/block/dasd_eckd.c 		 ((blk_per_trk * block->bp_block) >> 10),
block            2356 drivers/s390/block/dasd_eckd.c static int dasd_eckd_do_analysis(struct dasd_block *block)
block            2358 drivers/s390/block/dasd_eckd.c 	struct dasd_eckd_private *private = block->base->private;
block            2361 drivers/s390/block/dasd_eckd.c 		return dasd_eckd_start_analysis(block);
block            2363 drivers/s390/block/dasd_eckd.c 		return dasd_eckd_end_analysis(block);
block            2387 drivers/s390/block/dasd_eckd.c dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
block            2389 drivers/s390/block/dasd_eckd.c 	struct dasd_eckd_private *private = block->base->private;
block            2391 drivers/s390/block/dasd_eckd.c 	if (dasd_check_blocksize(block->bp_block) == 0) {
block            2393 drivers/s390/block/dasd_eckd.c 					      0, block->bp_block);
block            2723 drivers/s390/block/dasd_eckd.c 			      base->block->bp_block);
block            3029 drivers/s390/block/dasd_eckd.c 				      struct dasd_block *block)
block            3035 drivers/s390/block/dasd_eckd.c 	spin_lock_irqsave(&block->format_lock, flags);
block            3036 drivers/s390/block/dasd_eckd.c 	list_for_each_entry(format, &block->format_list, list) {
block            3042 drivers/s390/block/dasd_eckd.c 	list_add_tail(&to_format->list, &block->format_list);
block            3045 drivers/s390/block/dasd_eckd.c 	spin_unlock_irqrestore(&block->format_lock, flags);
block            3050 drivers/s390/block/dasd_eckd.c 			      struct dasd_block *block)
block            3054 drivers/s390/block/dasd_eckd.c 	spin_lock_irqsave(&block->format_lock, flags);
block            3056 drivers/s390/block/dasd_eckd.c 	spin_unlock_irqrestore(&block->format_lock, flags);
block            3068 drivers/s390/block/dasd_eckd.c 	clear_format_track(format, cqr->basedev->block);
block            3083 drivers/s390/block/dasd_eckd.c 	struct dasd_block *block;
block            3092 drivers/s390/block/dasd_eckd.c 	block = cqr->block;
block            3093 drivers/s390/block/dasd_eckd.c 	base = block->base;
block            3095 drivers/s390/block/dasd_eckd.c 	blksize = block->bp_block;
block            3099 drivers/s390/block/dasd_eckd.c 	first_trk = blk_rq_pos(req) >> block->s2b_shift;
block            3102 drivers/s390/block/dasd_eckd.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
block            3116 drivers/s390/block/dasd_eckd.c 	if (test_and_set_format_track(format, block))
block            3163 drivers/s390/block/dasd_eckd.c 	struct dasd_block *block;
block            3174 drivers/s390/block/dasd_eckd.c 	base = cqr->block->base;
block            3175 drivers/s390/block/dasd_eckd.c 	blksize = base->block->bp_block;
block            3176 drivers/s390/block/dasd_eckd.c 	block =  cqr->block;
block            3182 drivers/s390/block/dasd_eckd.c 	first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
block            3185 drivers/s390/block/dasd_eckd.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
block            3452 drivers/s390/block/dasd_eckd.c 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
block            3454 drivers/s390/block/dasd_eckd.c 		cqr->startdev = cqr->block->base;
block            3455 drivers/s390/block/dasd_eckd.c 		cqr->lpm = dasd_path_get_opm(cqr->block->base);
block            3499 drivers/s390/block/dasd_eckd.c 		if (!device->block && private->lcu &&
block            3551 drivers/s390/block/dasd_eckd.c 	if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
block            3630 drivers/s390/block/dasd_eckd.c dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
block            3721 drivers/s390/block/dasd_eckd.c 	cqr->block = block;
block            3750 drivers/s390/block/dasd_eckd.c 	struct dasd_block *block = device->block;
block            3789 drivers/s390/block/dasd_eckd.c 			spin_lock_irq(&block->queue_lock);
block            3791 drivers/s390/block/dasd_eckd.c 			spin_unlock_irq(&block->queue_lock);
block            3802 drivers/s390/block/dasd_eckd.c 			spin_lock_irq(&block->queue_lock);
block            3804 drivers/s390/block/dasd_eckd.c 			spin_unlock_irq(&block->queue_lock);
block            3828 drivers/s390/block/dasd_eckd.c 					       struct dasd_block *block,
block            3854 drivers/s390/block/dasd_eckd.c 	basedev = block->base;
block            3870 drivers/s390/block/dasd_eckd.c 		count += bv.bv_len >> (block->s2b_shift + 9);
block            3872 drivers/s390/block/dasd_eckd.c 			cidaw += bv.bv_len >> (block->s2b_shift + 9);
block            3997 drivers/s390/block/dasd_eckd.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block            4001 drivers/s390/block/dasd_eckd.c 	cqr->block = block;
block            4020 drivers/s390/block/dasd_eckd.c 					       struct dasd_block *block,
block            4048 drivers/s390/block/dasd_eckd.c 	basedev = block->base;
block            4176 drivers/s390/block/dasd_eckd.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block            4180 drivers/s390/block/dasd_eckd.c 	cqr->block = block;
block            4352 drivers/s390/block/dasd_eckd.c 					       struct dasd_block *block,
block            4383 drivers/s390/block/dasd_eckd.c 	basedev = block->base;
block            4499 drivers/s390/block/dasd_eckd.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block            4504 drivers/s390/block/dasd_eckd.c 	cqr->block = block;
block            4525 drivers/s390/block/dasd_eckd.c 					       struct dasd_block *block,
block            4541 drivers/s390/block/dasd_eckd.c 	basedev = block->base;
block            4545 drivers/s390/block/dasd_eckd.c 	blksize = block->bp_block;
block            4550 drivers/s390/block/dasd_eckd.c 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
block            4553 drivers/s390/block/dasd_eckd.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
block            4575 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
block            4586 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
block            4596 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
block            4605 drivers/s390/block/dasd_eckd.c 						   struct dasd_block *block,
block            4637 drivers/s390/block/dasd_eckd.c 	basedev = block->base;
block            4743 drivers/s390/block/dasd_eckd.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block            4747 drivers/s390/block/dasd_eckd.c 	cqr->block = block;
block            4772 drivers/s390/block/dasd_eckd.c 	private = cqr->block->base->private;
block            4773 drivers/s390/block/dasd_eckd.c 	blksize = cqr->block->bp_block;
block            4775 drivers/s390/block/dasd_eckd.c 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
block            4845 drivers/s390/block/dasd_eckd.c 						     struct dasd_block *block,
block            4863 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp_raw(startdev, block, req);
block            4865 drivers/s390/block/dasd_eckd.c 		cqr = dasd_eckd_build_cp(startdev, block, req);
block            5364 drivers/s390/block/dasd_eckd.c dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
block            5366 drivers/s390/block/dasd_eckd.c 	struct dasd_device *device = block->base;
block            5886 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            5955 drivers/s390/block/dasd_eckd.c 	if (!device->block && private->lcu->pav == HYPER_PAV)
block            5979 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            6146 drivers/s390/block/dasd_eckd.c 	cqr->block = NULL;
block            6629 drivers/s390/block/dasd_eckd.c static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
block            6631 drivers/s390/block/dasd_eckd.c 	unsigned int logical_block_size = block->bp_block;
block            6632 drivers/s390/block/dasd_eckd.c 	struct request_queue *q = block->request_queue;
block            6633 drivers/s390/block/dasd_eckd.c 	struct dasd_device *device = block->base;
block            6644 drivers/s390/block/dasd_eckd.c 		max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
block            6646 drivers/s390/block/dasd_eckd.c 		max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
block             131 drivers/s390/block/dasd_fba.c 	struct dasd_block *block;
block             146 drivers/s390/block/dasd_fba.c 	block = dasd_alloc_block();
block             147 drivers/s390/block/dasd_fba.c 	if (IS_ERR(block)) {
block             152 drivers/s390/block/dasd_fba.c 		return PTR_ERR(block);
block             154 drivers/s390/block/dasd_fba.c 	device->block = block;
block             155 drivers/s390/block/dasd_fba.c 	block->base = device;
block             163 drivers/s390/block/dasd_fba.c 		device->block = NULL;
block             164 drivers/s390/block/dasd_fba.c 		dasd_free_block(block);
block             195 drivers/s390/block/dasd_fba.c static int dasd_fba_do_analysis(struct dasd_block *block)
block             197 drivers/s390/block/dasd_fba.c 	struct dasd_fba_private *private = block->base->private;
block             202 drivers/s390/block/dasd_fba.c 		DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
block             206 drivers/s390/block/dasd_fba.c 	block->blocks = private->rdc_data.blk_bdsa;
block             207 drivers/s390/block/dasd_fba.c 	block->bp_block = private->rdc_data.blk_size;
block             208 drivers/s390/block/dasd_fba.c 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
block             210 drivers/s390/block/dasd_fba.c 		block->s2b_shift++;
block             214 drivers/s390/block/dasd_fba.c static int dasd_fba_fill_geometry(struct dasd_block *block,
block             217 drivers/s390/block/dasd_fba.c 	if (dasd_check_blocksize(block->bp_block) != 0)
block             219 drivers/s390/block/dasd_fba.c 	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
block             221 drivers/s390/block/dasd_fba.c 	geo->sectors = 128 >> block->s2b_shift;
block             326 drivers/s390/block/dasd_fba.c 						struct dasd_block *block,
block             336 drivers/s390/block/dasd_fba.c 	unsigned int blksize = block->bp_block;
block             346 drivers/s390/block/dasd_fba.c 	first_rec = blk_rq_pos(req) >> block->s2b_shift;
block             348 drivers/s390/block/dasd_fba.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
block             423 drivers/s390/block/dasd_fba.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block             428 drivers/s390/block/dasd_fba.c 	cqr->block = block;
block             439 drivers/s390/block/dasd_fba.c 						struct dasd_block *block,
block             442 drivers/s390/block/dasd_fba.c 	struct dasd_fba_private *private = block->base->private;
block             461 drivers/s390/block/dasd_fba.c 	blksize = block->bp_block;
block             463 drivers/s390/block/dasd_fba.c 	first_rec = blk_rq_pos(req) >> block->s2b_shift;
block             465 drivers/s390/block/dasd_fba.c 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
block             473 drivers/s390/block/dasd_fba.c 		count += bv.bv_len >> (block->s2b_shift + 9);
block             501 drivers/s390/block/dasd_fba.c 		      block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
block             537 drivers/s390/block/dasd_fba.c 			ccw->count = block->bp_block;
block             552 drivers/s390/block/dasd_fba.c 	    block->base->features & DASD_FEATURE_FAILFAST)
block             556 drivers/s390/block/dasd_fba.c 	cqr->block = block;
block             565 drivers/s390/block/dasd_fba.c 					      struct dasd_block *block,
block             569 drivers/s390/block/dasd_fba.c 		return dasd_fba_build_cp_discard(memdev, block, req);
block             571 drivers/s390/block/dasd_fba.c 		return dasd_fba_build_cp_regular(memdev, block, req);
block             577 drivers/s390/block/dasd_fba.c 	struct dasd_fba_private *private = cqr->block->base->private;
block             587 drivers/s390/block/dasd_fba.c 	blksize = cqr->block->bp_block;
block             775 drivers/s390/block/dasd_fba.c static void dasd_fba_setup_blk_queue(struct dasd_block *block)
block             777 drivers/s390/block/dasd_fba.c 	unsigned int logical_block_size = block->bp_block;
block             778 drivers/s390/block/dasd_fba.c 	struct request_queue *q = block->request_queue;
block             782 drivers/s390/block/dasd_fba.c 	max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
block              30 drivers/s390/block/dasd_genhd.c int dasd_gendisk_alloc(struct dasd_block *block)
block              37 drivers/s390/block/dasd_genhd.c 	base = block->base;
block              76 drivers/s390/block/dasd_genhd.c 	gdp->queue = block->request_queue;
block              77 drivers/s390/block/dasd_genhd.c 	block->gdp = gdp;
block              78 drivers/s390/block/dasd_genhd.c 	set_capacity(block->gdp, 0);
block              79 drivers/s390/block/dasd_genhd.c 	device_add_disk(&base->cdev->dev, block->gdp, NULL);
block              86 drivers/s390/block/dasd_genhd.c void dasd_gendisk_free(struct dasd_block *block)
block              88 drivers/s390/block/dasd_genhd.c 	if (block->gdp) {
block              89 drivers/s390/block/dasd_genhd.c 		del_gendisk(block->gdp);
block              90 drivers/s390/block/dasd_genhd.c 		block->gdp->private_data = NULL;
block              91 drivers/s390/block/dasd_genhd.c 		put_disk(block->gdp);
block              92 drivers/s390/block/dasd_genhd.c 		block->gdp = NULL;
block              99 drivers/s390/block/dasd_genhd.c int dasd_scan_partitions(struct dasd_block *block)
block             104 drivers/s390/block/dasd_genhd.c 	bdev = bdget_disk(block->gdp, 0);
block             106 drivers/s390/block/dasd_genhd.c 		DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
block             113 drivers/s390/block/dasd_genhd.c 		DBF_DEV_EVENT(DBF_ERR, block->base,
block             121 drivers/s390/block/dasd_genhd.c 		DBF_DEV_EVENT(DBF_ERR, block->base,
block             134 drivers/s390/block/dasd_genhd.c 	block->bdev = bdev;
block             142 drivers/s390/block/dasd_genhd.c void dasd_destroy_partitions(struct dasd_block *block)
block             153 drivers/s390/block/dasd_genhd.c 	bdev = block->bdev;
block             154 drivers/s390/block/dasd_genhd.c 	block->bdev = NULL;
block             165 drivers/s390/block/dasd_genhd.c 	for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
block             168 drivers/s390/block/dasd_genhd.c 	invalidate_partition(block->gdp, 0);
block             171 drivers/s390/block/dasd_genhd.c 	set_capacity(block->gdp, 0);
block             164 drivers/s390/block/dasd_int.h 	struct dasd_block *block;	/* the originating block device */
block             486 drivers/s390/block/dasd_int.h 	struct dasd_block *block;
block              59 drivers/s390/block/dasd_ioctl.c 		     (loff_t)get_capacity(base->block->gdp) << 9);
block             103 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_quiesce(struct dasd_block *block)
block             108 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             124 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_resume(struct dasd_block *block)
block             129 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             139 drivers/s390/block/dasd_ioctl.c 	dasd_schedule_block_bh(block);
block             146 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_abortio(struct dasd_block *block)
block             152 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             160 drivers/s390/block/dasd_ioctl.c 	spin_lock_irqsave(&block->request_queue_lock, flags);
block             161 drivers/s390/block/dasd_ioctl.c 	spin_lock(&block->queue_lock);
block             162 drivers/s390/block/dasd_ioctl.c 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
block             167 drivers/s390/block/dasd_ioctl.c 			spin_unlock(&block->queue_lock);
block             169 drivers/s390/block/dasd_ioctl.c 			spin_lock(&block->queue_lock);
block             172 drivers/s390/block/dasd_ioctl.c 	spin_unlock(&block->queue_lock);
block             173 drivers/s390/block/dasd_ioctl.c 	spin_unlock_irqrestore(&block->request_queue_lock, flags);
block             175 drivers/s390/block/dasd_ioctl.c 	dasd_schedule_block_bh(block);
block             182 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_allowio(struct dasd_block *block)
block             186 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             203 drivers/s390/block/dasd_ioctl.c dasd_format(struct dasd_block *block, struct format_data_t *fdata)
block             208 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             229 drivers/s390/block/dasd_ioctl.c 		struct block_device *bdev = bdget_disk(block->gdp, 0);
block             241 drivers/s390/block/dasd_ioctl.c static int dasd_check_format(struct dasd_block *block,
block             247 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             290 drivers/s390/block/dasd_ioctl.c 	rc = dasd_format(base->block, &fdata);
block             323 drivers/s390/block/dasd_ioctl.c 	rc = dasd_check_format(base->block, &cdata);
block             393 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_reset_profile(struct dasd_block *block)
block             395 drivers/s390/block/dasd_ioctl.c 	dasd_profile_reset(&block->profile);
block             402 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
block             411 drivers/s390/block/dasd_ioctl.c 	spin_lock_bh(&block->profile.lock);
block             412 drivers/s390/block/dasd_ioctl.c 	if (block->profile.data) {
block             413 drivers/s390/block/dasd_ioctl.c 		data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
block             414 drivers/s390/block/dasd_ioctl.c 		data->dasd_io_sects = block->profile.data->dasd_io_sects;
block             415 drivers/s390/block/dasd_ioctl.c 		memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
block             417 drivers/s390/block/dasd_ioctl.c 		memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
block             419 drivers/s390/block/dasd_ioctl.c 		memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
block             421 drivers/s390/block/dasd_ioctl.c 		memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
block             423 drivers/s390/block/dasd_ioctl.c 		memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
block             426 drivers/s390/block/dasd_ioctl.c 		       block->profile.data->dasd_io_time2ps,
block             428 drivers/s390/block/dasd_ioctl.c 		memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
block             431 drivers/s390/block/dasd_ioctl.c 		       block->profile.data->dasd_io_nr_req,
block             433 drivers/s390/block/dasd_ioctl.c 		spin_unlock_bh(&block->profile.lock);
block             435 drivers/s390/block/dasd_ioctl.c 		spin_unlock_bh(&block->profile.lock);
block             446 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_reset_profile(struct dasd_block *block)
block             451 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
block             460 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_information(struct dasd_block *block,
block             472 drivers/s390/block/dasd_ioctl.c 	base = block->base;
block             502 drivers/s390/block/dasd_ioctl.c 	dasd_info->open_count = atomic_read(&block->open_count);
block             503 drivers/s390/block/dasd_ioctl.c 	if (!block->bdev)
block             511 drivers/s390/block/dasd_ioctl.c 	    (dasd_check_blocksize(block->bp_block)))
block             519 drivers/s390/block/dasd_ioctl.c 	spin_lock_irqsave(&block->queue_lock, flags);
block             522 drivers/s390/block/dasd_ioctl.c 	spin_unlock_irqrestore(&block->queue_lock, flags);
block             563 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
block             570 drivers/s390/block/dasd_ioctl.c 	ret = cmf_readall(block->base->cdev, &data);
block             579 drivers/s390/block/dasd_ioctl.c 	struct dasd_block *block;
block             597 drivers/s390/block/dasd_ioctl.c 	block = base->block;
block             607 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_quiesce(block);
block             610 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_resume(block);
block             613 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_abortio(block);
block             616 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_allowio(block);
block             625 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_information(block, cmd, argp);
block             628 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_information(block, cmd, argp);
block             631 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_read_profile(block, argp);
block             634 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_reset_profile(block);
block             649 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_readall_cmb(block, cmd, argp);
block             658 drivers/s390/block/dasd_ioctl.c 			rc = base->discipline->ioctl(block, cmd, argp);
block              39 drivers/s390/block/dasd_proc.c 	struct dasd_block *block;
block              45 drivers/s390/block/dasd_proc.c 	if (device->block)
block              46 drivers/s390/block/dasd_proc.c 		block = device->block;
block              59 drivers/s390/block/dasd_proc.c 	if (block->gdp)
block              61 drivers/s390/block/dasd_proc.c 			   MAJOR(disk_devt(block->gdp)),
block              62 drivers/s390/block/dasd_proc.c 			   MINOR(disk_devt(block->gdp)));
block              66 drivers/s390/block/dasd_proc.c 	if (block->gdp)
block              67 drivers/s390/block/dasd_proc.c 		seq_printf(m, " is %-8s", block->gdp->disk_name);
block              90 drivers/s390/block/dasd_proc.c 		if (dasd_check_blocksize(block->bp_block))
block              95 drivers/s390/block/dasd_proc.c 				   block->bp_block, block->blocks,
block              96 drivers/s390/block/dasd_proc.c 				   ((block->bp_block >> 9) *
block              97 drivers/s390/block/dasd_proc.c 				    block->blocks) >> 11);
block             145 drivers/s390/block/dasd_proc.c 		if (device->block)
block             146 drivers/s390/block/dasd_proc.c 			rc = dasd_profile_on(&device->block->profile);
block             163 drivers/s390/block/dasd_proc.c 		if (device->block)
block             164 drivers/s390/block/dasd_proc.c 			dasd_profile_off(&device->block->profile);
block             178 drivers/s390/block/dasd_proc.c 		if (device->block)
block             179 drivers/s390/block/dasd_proc.c 			dasd_profile_reset(&device->block->profile);
block              39 drivers/s390/char/tape_34xx.c 	unsigned int	block		: 22;
block             932 drivers/s390/char/tape_34xx.c 			if (bid.block < sbid->bid.block)
block             939 drivers/s390/char/tape_34xx.c 		if (bid.block < sbid->bid.block) {
block             954 drivers/s390/char/tape_34xx.c 			sbid->bid.block
block             977 drivers/s390/char/tape_34xx.c 		if (sbid->bid.block >= from) {
block             981 drivers/s390/char/tape_34xx.c 				sbid->bid.block
block            1013 drivers/s390/char/tape_34xx.c 		if (sbid->bid.block >= bid->block)
block            1023 drivers/s390/char/tape_34xx.c 			sbid_to_use->bid.block,
block            1024 drivers/s390/char/tape_34xx.c 			bid->block
block            1080 drivers/s390/char/tape_34xx.c 	return block_id.cbid.block;
block            1105 drivers/s390/char/tape_34xx.c 	bid->block  = mt_count;
block            1240 drivers/scsi/aacraid/aachba.c 		readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
block            1241 drivers/scsi/aacraid/aachba.c 		readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
block            1280 drivers/scsi/aacraid/aachba.c 	readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
block            1315 drivers/scsi/aacraid/aachba.c 	readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
block            1371 drivers/scsi/aacraid/aachba.c 		writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
block            1372 drivers/scsi/aacraid/aachba.c 		writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
block            1414 drivers/scsi/aacraid/aachba.c 	writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
block            1449 drivers/scsi/aacraid/aachba.c 	writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
block            1855 drivers/scsi/aacraid/aacraid.h 	__le32		block;
block            1865 drivers/scsi/aacraid/aacraid.h 	__le32		block;
block            1881 drivers/scsi/aacraid/aacraid.h 	__le32		block;
block            1892 drivers/scsi/aacraid/aacraid.h 	__le32		block;
block            1906 drivers/scsi/aacraid/aacraid.h 	__le32		block[2];
block            4721 drivers/scsi/bfa/bfa_ioc.c 	bfa_trc(diag, diag->block);
block            4742 drivers/scsi/bfa/bfa_ioc.c 		if (diag->block) {
block            4750 drivers/scsi/bfa/bfa_ioc.c 			diag->block = 0;
block            4806 drivers/scsi/bfa/bfa_ioc.c 	diag->block = 0;
block            5078 drivers/scsi/bfa/bfa_ioc.c 	if (diag->block) {
block            5079 drivers/scsi/bfa/bfa_ioc.c 		bfa_trc(diag, diag->block);
block            5082 drivers/scsi/bfa/bfa_ioc.c 		diag->block = 1;
block            5127 drivers/scsi/bfa/bfa_ioc.c 	if (diag->block || diag->fwping.lock) {
block            5128 drivers/scsi/bfa/bfa_ioc.c 		bfa_trc(diag, diag->block);
block            5166 drivers/scsi/bfa/bfa_ioc.c 	if (diag->block || diag->tsensor.lock) {
block            5167 drivers/scsi/bfa/bfa_ioc.c 		bfa_trc(diag, diag->block);
block            5275 drivers/scsi/bfa/bfa_ioc.c 	diag->block = 0;
block             620 drivers/scsi/bfa/bfa_ioc.h 	u8		block;
block            4659 drivers/scsi/hpsa.c 	u32 block;
block            4671 drivers/scsi/hpsa.c 			block = (((cdb[1] & 0x1F) << 16) |
block            4679 drivers/scsi/hpsa.c 			block = get_unaligned_be32(&cdb[2]);
block            4687 drivers/scsi/hpsa.c 		cdb[2] = (u8) (block >> 24);
block            4688 drivers/scsi/hpsa.c 		cdb[3] = (u8) (block >> 16);
block            4689 drivers/scsi/hpsa.c 		cdb[4] = (u8) (block >> 8);
block            4690 drivers/scsi/hpsa.c 		cdb[5] = (u8) (block);
block            1362 drivers/scsi/megaraid/megaraid_sas_fp.c 	u64 block = io_info->ldStartBlock;
block            1391 drivers/scsi/megaraid/megaraid_sas_fp.c 		diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
block            1392 drivers/scsi/megaraid/megaraid_sas_fp.c 		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
block            1408 drivers/scsi/megaraid/megaraid_sas_fp.c 	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
block             454 drivers/scsi/mpt3sas/mpt3sas_base.h 	u8	block;
block            3295 drivers/scsi/mpt3sas/mpt3sas_scsih.c 	sas_device_priv_data->block = 1;
block            3320 drivers/scsi/mpt3sas/mpt3sas_scsih.c 	sas_device_priv_data->block = 0;
block            3332 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		sas_device_priv_data->block = 1;
block            3339 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		sas_device_priv_data->block = 0;
block            3364 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		if (!sas_device_priv_data->block)
block            3395 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		if (sas_device_priv_data->block)
block            3417 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		if (sas_device_priv_data->block)
block            3451 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		if (sas_device_priv_data->block)
block            4690 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		   sas_device_priv_data->block) {
block            5326 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		if (sas_device_priv_data->block) {
block            1069 drivers/scsi/mvsas/mv_94xx.c 			u32 block = ioread32be(regs + MVS_SGPIO_DCTRL +
block            1085 drivers/scsi/mvsas/mv_94xx.c 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT)
block            1088 drivers/scsi/mvsas/mv_94xx.c 				block |= LED_BLINKA_SOF << (
block            1093 drivers/scsi/mvsas/mv_94xx.c 				block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT)
block            1095 drivers/scsi/mvsas/mv_94xx.c 				block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT;
block            1098 drivers/scsi/mvsas/mv_94xx.c 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT)
block            1100 drivers/scsi/mvsas/mv_94xx.c 				block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT;
block            1104 drivers/scsi/mvsas/mv_94xx.c 			iowrite32be(block,
block             915 drivers/scsi/qedi/qedi_main.c static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
block             920 drivers/scsi/qedi/qedi_main.c 	ipv6_en = !!(block->generic.ctrl_flags &
block             924 drivers/scsi/qedi/qedi_main.c 		 block->target[index].target_name.byte);
block             930 drivers/scsi/qedi/qedi_main.c 			 block->target[index].ipv6_addr.byte);
block             933 drivers/scsi/qedi/qedi_main.c 			 block->target[index].ipv4_addr.byte);
block             938 drivers/scsi/qedi/qedi_main.c 			       struct nvm_iscsi_block *block)
block             950 drivers/scsi/qedi/qedi_main.c 	pri_ctrl_flags = !!(block->target[0].ctrl_flags &
block             956 drivers/scsi/qedi/qedi_main.c 		qedi_get_boot_tgt_info(block, pri_tgt, 0);
block             959 drivers/scsi/qedi/qedi_main.c 	sec_ctrl_flags = !!(block->target[1].ctrl_flags &
block             967 drivers/scsi/qedi/qedi_main.c 		qedi_get_boot_tgt_info(block, sec_tgt, 1);
block            1058 drivers/scsi/qedi/qedi_main.c 	struct nvm_iscsi_block *block = NULL;
block            1085 drivers/scsi/qedi/qedi_main.c 	block = qedi_get_nvram_block(qedi);
block            1086 drivers/scsi/qedi/qedi_main.c 	if (block) {
block            1087 drivers/scsi/qedi/qedi_main.c 		chap_en = !!(block->generic.ctrl_flags &
block            1089 drivers/scsi/qedi/qedi_main.c 		mchap_en = !!(block->generic.ctrl_flags &
block            1105 drivers/scsi/qedi/qedi_main.c 		rval = qedi_find_boot_info(qedi, iscsi, block);
block            1960 drivers/scsi/qedi/qedi_main.c 	struct nvm_iscsi_block *block;
block            1963 drivers/scsi/qedi/qedi_main.c 	block = &qedi->iscsi_image->iscsi_cfg.block[0];
block            1964 drivers/scsi/qedi/qedi_main.c 	for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
block            1965 drivers/scsi/qedi/qedi_main.c 		flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
block            1969 drivers/scsi/qedi/qedi_main.c 			(pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
block            1971 drivers/scsi/qedi/qedi_main.c 			return block;
block            1982 drivers/scsi/qedi/qedi_main.c 	struct nvm_iscsi_block *block;
block            1985 drivers/scsi/qedi/qedi_main.c 	block = qedi_get_nvram_block(qedi);
block            1986 drivers/scsi/qedi/qedi_main.c 	if (!block)
block            1989 drivers/scsi/qedi/qedi_main.c 	initiator = &block->initiator;
block            1990 drivers/scsi/qedi/qedi_main.c 	ipv6_en = block->generic.ctrl_flags &
block            1992 drivers/scsi/qedi/qedi_main.c 	dhcp_en = block->generic.ctrl_flags &
block            2075 drivers/scsi/qedi/qedi_main.c 	struct nvm_iscsi_block *block;
block            2077 drivers/scsi/qedi/qedi_main.c 	block = qedi_get_nvram_block(qedi);
block            2078 drivers/scsi/qedi/qedi_main.c 	if (!block)
block            2081 drivers/scsi/qedi/qedi_main.c 	initiator = &block->initiator;
block            2116 drivers/scsi/qedi/qedi_main.c 	struct nvm_iscsi_block *block;
block            2120 drivers/scsi/qedi/qedi_main.c 	block = qedi_get_nvram_block(qedi);
block            2121 drivers/scsi/qedi/qedi_main.c 	if (!block)
block            2126 drivers/scsi/qedi/qedi_main.c 		  GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
block            2128 drivers/scsi/qedi/qedi_main.c 	ctrl_flags = block->target[idx].ctrl_flags &
block            2137 drivers/scsi/qedi/qedi_main.c 	ipv6_en = block->generic.ctrl_flags &
block            2140 drivers/scsi/qedi/qedi_main.c 	chap_en = block->generic.ctrl_flags &
block            2142 drivers/scsi/qedi/qedi_main.c 	chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
block            2143 drivers/scsi/qedi/qedi_main.c 	chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
block            2145 drivers/scsi/qedi/qedi_main.c 	mchap_en = block->generic.ctrl_flags &
block            2147 drivers/scsi/qedi/qedi_main.c 	mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
block            2148 drivers/scsi/qedi/qedi_main.c 	mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
block            2153 drivers/scsi/qedi/qedi_main.c 			     block->target[idx].target_name.byte);
block            2158 drivers/scsi/qedi/qedi_main.c 				      block->target[idx].ipv6_addr.byte);
block            2161 drivers/scsi/qedi/qedi_main.c 				      block->target[idx].ipv4_addr.byte);
block            2165 drivers/scsi/qedi/qedi_main.c 			      GET_FIELD2(block->target[idx].generic_cont0,
block            2170 drivers/scsi/qedi/qedi_main.c 			      block->target[idx].lun.value[1],
block            2171 drivers/scsi/qedi/qedi_main.c 			      block->target[idx].lun.value[0]);
block             204 drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h 	struct nvm_iscsi_block	block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
block            2483 drivers/scsi/scsi_debug.c 	u64 block, rest = 0;
block            2499 drivers/scsi/scsi_debug.c 	block = do_div(lba, sdebug_store_sectors);
block            2500 drivers/scsi/scsi_debug.c 	if (block + num > sdebug_store_sectors)
block            2501 drivers/scsi/scsi_debug.c 		rest = block + num - sdebug_store_sectors;
block            2504 drivers/scsi/scsi_debug.c 		   fake_storep + (block * sdebug_sector_size),
block            2525 drivers/scsi/scsi_debug.c 	u64 block, rest = 0;
block            2529 drivers/scsi/scsi_debug.c 	block = do_div(lba, store_blks);
block            2530 drivers/scsi/scsi_debug.c 	if (block + num > store_blks)
block            2531 drivers/scsi/scsi_debug.c 		rest = block + num - store_blks;
block            2533 drivers/scsi/scsi_debug.c 	res = !memcmp(fake_storep + (block * lb_size), arr,
block            2543 drivers/scsi/scsi_debug.c 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
block            3251 drivers/scsi/scsi_debug.c 	u64 block, lbaa;
block            3265 drivers/scsi/scsi_debug.c 	block = do_div(lbaa, sdebug_store_sectors);
block            3267 drivers/scsi/scsi_debug.c 	fs1p = fake_storep + (block * lb_size);
block            3285 drivers/scsi/scsi_debug.c 		block = do_div(lbaa, sdebug_store_sectors);
block            3286 drivers/scsi/scsi_debug.c 		memmove(fake_storep + (block * lb_size), fs1p, lb_size);
block            4196 drivers/scsi/scsi_debug.c static void block_unblock_all_queues(bool block)
block            4202 drivers/scsi/scsi_debug.c 		atomic_set(&sqp->blocked, (int)block);
block             221 drivers/scsi/sd_zbc.c 	sector_t block = sectors_to_logical(sdkp->device, sector);
block             241 drivers/scsi/sd_zbc.c 		put_unaligned_be64(block, &cmd->cmnd[2]);
block             343 drivers/scsi/sd_zbc.c 	sector_t max_lba, block = 0;
block             408 drivers/scsi/sd_zbc.c 				   (block + this_zone_blocks < sdkp->capacity
block             413 drivers/scsi/sd_zbc.c 			block += this_zone_blocks;
block             417 drivers/scsi/sd_zbc.c 		if (block < sdkp->capacity) {
block             418 drivers/scsi/sd_zbc.c 			ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
block             424 drivers/scsi/sd_zbc.c 	} while (block < sdkp->capacity);
block             390 drivers/scsi/sr.c 	int block = 0, this_count, s_size;
block             405 drivers/scsi/sr.c 		"Doing sr request, block = %d\n", block));
block             491 drivers/scsi/sr.c 	block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
block             498 drivers/scsi/sr.c 	SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
block             499 drivers/scsi/sr.c 	SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
block             500 drivers/scsi/sr.c 	SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
block             501 drivers/scsi/sr.c 	SCpnt->cmnd[5] = (unsigned char) block & 0xff;
block            3104 drivers/scsi/st.c static int get_location(struct scsi_tape *STp, unsigned int *block, int *partition,
block            3132 drivers/scsi/st.c 		*block = *partition = 0;
block            3138 drivers/scsi/st.c 			*block = ((STp->buffer)->b_data[0] << 16)
block            3143 drivers/scsi/st.c 			*block = ((STp->buffer)->b_data[4] << 24)
block            3153 drivers/scsi/st.c 			    *block, *partition);
block            3164 drivers/scsi/st.c static int set_location(struct scsi_tape *STp, unsigned int block, int partition,
block            3180 drivers/scsi/st.c 		    block, partition);
block            3203 drivers/scsi/st.c 		scmd[2] = (block >> 16);
block            3204 drivers/scsi/st.c 		scmd[3] = (block >> 8);
block            3205 drivers/scsi/st.c 		scmd[4] = block;
block            3209 drivers/scsi/st.c 		scmd[3] = (block >> 24);
block            3210 drivers/scsi/st.c 		scmd[4] = (block >> 16);
block            3211 drivers/scsi/st.c 		scmd[5] = (block >> 8);
block            3212 drivers/scsi/st.c 		scmd[6] = block;
block            3246 drivers/scsi/st.c 			    STps->last_block_visited != block) {
block            3252 drivers/scsi/st.c 		if (block == 0)
block            3269 drivers/scsi/st.c 	unsigned int block;
block            3271 drivers/scsi/st.c 	if ((i = get_location(STp, &block, &partition, 1)) < 0)
block            1143 drivers/soc/ti/knav_qmss_queue.c 				       struct knav_link_ram_block *block)
block            1168 drivers/soc/ti/knav_qmss_queue.c 			block->dma = (dma_addr_t)temp[0];
block            1169 drivers/soc/ti/knav_qmss_queue.c 			block->virt = NULL;
block            1170 drivers/soc/ti/knav_qmss_queue.c 			block->size = temp[1];
block            1172 drivers/soc/ti/knav_qmss_queue.c 			block->size = temp[1];
block            1174 drivers/soc/ti/knav_qmss_queue.c 			block->virt = dmam_alloc_coherent(kdev->dev,
block            1175 drivers/soc/ti/knav_qmss_queue.c 						  8 * block->size, &block->dma,
block            1177 drivers/soc/ti/knav_qmss_queue.c 			if (!block->virt) {
block            1190 drivers/soc/ti/knav_qmss_queue.c 	struct knav_link_ram_block *block;
block            1194 drivers/soc/ti/knav_qmss_queue.c 		block = &kdev->link_rams[0];
block            1196 drivers/soc/ti/knav_qmss_queue.c 			&block->dma, block->virt, block->size);
block            1197 drivers/soc/ti/knav_qmss_queue.c 		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
block            1199 drivers/soc/ti/knav_qmss_queue.c 			writel_relaxed(block->size,
block            1202 drivers/soc/ti/knav_qmss_queue.c 			writel_relaxed(block->size - 1,
block            1204 drivers/soc/ti/knav_qmss_queue.c 		block++;
block            1205 drivers/soc/ti/knav_qmss_queue.c 		if (!block->size)
block            1209 drivers/soc/ti/knav_qmss_queue.c 			&block->dma, block->virt, block->size);
block            1210 drivers/soc/ti/knav_qmss_queue.c 		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
block             366 drivers/staging/comedi/drivers/jr3_pci.c 	struct jr3_block __iomem *block = dev->mmio;
block             399 drivers/staging/comedi/drivers/jr3_pci.c 				lo = &block[subdev].program_lo[addr];
block             400 drivers/staging/comedi/drivers/jr3_pci.c 				hi = &block[subdev].program_hi[addr];
block             624 drivers/staging/comedi/drivers/jr3_pci.c 	struct jr3_block __iomem *block = dev->mmio;
block             633 drivers/staging/comedi/drivers/jr3_pci.c 	spriv->sensor = &block[s->index].sensor;
block             659 drivers/staging/comedi/drivers/jr3_pci.c 	struct jr3_block __iomem *block = dev->mmio;
block             660 drivers/staging/comedi/drivers/jr3_pci.c 	struct jr3_sensor __iomem *sensor0 = &block[0].sensor;
block             677 drivers/staging/comedi/drivers/jr3_pci.c 	struct jr3_block __iomem *block;
block             699 drivers/staging/comedi/drivers/jr3_pci.c 	if (pci_resource_len(pcidev, 0) < board->n_subdevs * sizeof(*block))
block             706 drivers/staging/comedi/drivers/jr3_pci.c 	block = dev->mmio;
block             731 drivers/staging/comedi/drivers/jr3_pci.c 		writel(0, &block[i].reset);
block            3253 drivers/staging/exfat/exfat_super.c static sector_t _exfat_bmap(struct address_space *mapping, sector_t block)
block            3259 drivers/staging/exfat/exfat_super.c 	blocknr = generic_block_bmap(mapping, block, exfat_get_block);
block             256 drivers/staging/kpc2000/kpc2000_i2c.c 		len = data->block[0];
block             259 drivers/staging/kpc2000/kpc2000_i2c.c 			outb_p(data->block[i + 1], SMBBLKDAT(priv));
block             271 drivers/staging/kpc2000/kpc2000_i2c.c 		data->block[0] = len;
block             273 drivers/staging/kpc2000/kpc2000_i2c.c 			data->block[i + 1] = inb_p(SMBBLKDAT(priv));
block             290 drivers/staging/kpc2000/kpc2000_i2c.c 	len = data->block[0];
block             294 drivers/staging/kpc2000/kpc2000_i2c.c 		outb_p(data->block[1], SMBBLKDAT(priv));
block             333 drivers/staging/kpc2000/kpc2000_i2c.c 			data->block[0] = len;
block             338 drivers/staging/kpc2000/kpc2000_i2c.c 			data->block[i] = inb_p(SMBBLKDAT(priv));
block             340 drivers/staging/kpc2000/kpc2000_i2c.c 			outb_p(data->block[i + 1], SMBBLKDAT(priv));
block             375 drivers/staging/kpc2000/kpc2000_i2c.c 		if (data->block[0] < 1)
block             376 drivers/staging/kpc2000/kpc2000_i2c.c 			data->block[0] = 1;
block             377 drivers/staging/kpc2000/kpc2000_i2c.c 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
block             378 drivers/staging/kpc2000/kpc2000_i2c.c 			data->block[0] = I2C_SMBUS_BLOCK_MAX;
block             380 drivers/staging/kpc2000/kpc2000_i2c.c 		data->block[0] = 32;	/* max for SMBus block reads */
block             405 drivers/staging/kpc2000/kpc2000_i2c.c 	int block = 0;
block             447 drivers/staging/kpc2000/kpc2000_i2c.c 		block = 1;
block             463 drivers/staging/kpc2000/kpc2000_i2c.c 		block = 1;
block             478 drivers/staging/kpc2000/kpc2000_i2c.c 	if (block) {
block             491 drivers/staging/kpc2000/kpc2000_i2c.c 	if (hwpec || block) {
block             495 drivers/staging/kpc2000/kpc2000_i2c.c 	if (block) {
block            1652 drivers/staging/media/ipu3/ipu3-abi.h 	struct imgu_abi_binary_block_info block;
block             211 drivers/staging/media/ipu3/ipu3-css-fw.c 		if (bi->info.isp.sp.block.block_width <= 0 ||
block             212 drivers/staging/media/ipu3/ipu3-css-fw.c 		    bi->info.isp.sp.block.block_width > BLOCK_MAX ||
block             213 drivers/staging/media/ipu3/ipu3-css-fw.c 		    bi->info.isp.sp.block.output_block_height <= 0 ||
block             214 drivers/staging/media/ipu3/ipu3-css-fw.c 		    bi->info.isp.sp.block.output_block_height > BLOCK_MAX)
block            1195 drivers/staging/media/ipu3/ipu3-css-params.c 				bi->info.isp.sp.block.block_width *
block            1305 drivers/staging/media/ipu3/ipu3-css.c 				bi->info.isp.sp.block.block_width *
block            1309 drivers/staging/media/ipu3/ipu3-css.c 				bi->info.isp.sp.block.output_block_height);
block              84 drivers/staging/rtl8723bs/include/ioctl_cfg80211.h 	bool block;
block            2066 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	if (adapter_wdev_data(padapter)->block == true)
block              80 drivers/staging/vt6656/usbpipe.c 			   u16 block, u8 reg, u16 length, u8 *data)
block              84 drivers/staging/vt6656/usbpipe.c 	for (i = 0; i < length; i += block) {
block              85 drivers/staging/vt6656/usbpipe.c 		u16 len = min_t(int, length - i, block);
block              32 drivers/staging/vt6656/usbpipe.h 			   u16 block, u8 reg, u16 len, u8 *data);
block            2639 drivers/target/target_core_user.c 	u32 start, end, block, total_freed = 0;
block            2663 drivers/target/target_core_user.c 		block = find_last_bit(udev->data_bitmap, end);
block            2664 drivers/target/target_core_user.c 		if (block == udev->dbi_max) {
block            2671 drivers/target/target_core_user.c 		} else if (block == end) {
block            2676 drivers/target/target_core_user.c 			udev->dbi_thresh = start = block + 1;
block            2677 drivers/target/target_core_user.c 			udev->dbi_max = block;
block              38 drivers/thunderbolt/property.c static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
block              95 drivers/thunderbolt/property.c static struct tb_property *tb_property_parse(const u32 *block, size_t block_len,
block             116 drivers/thunderbolt/property.c 		dir = __tb_property_parse_dir(block, block_len, entry->value,
block             132 drivers/thunderbolt/property.c 		parse_dwdata(property->value.data, block + entry->value,
block             143 drivers/thunderbolt/property.c 		parse_dwdata(property->value.text, block + entry->value,
block             161 drivers/thunderbolt/property.c static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
block             177 drivers/thunderbolt/property.c 		dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
block             187 drivers/thunderbolt/property.c 	entries = (const struct tb_property_entry *)&block[content_offset];
block             195 drivers/thunderbolt/property.c 		property = tb_property_parse(block, block_len, &entries[i]);
block             220 drivers/thunderbolt/property.c struct tb_property_dir *tb_property_parse_dir(const u32 *block,
block             224 drivers/thunderbolt/property.c 		(const struct tb_property_rootdir_entry *)block;
block             231 drivers/thunderbolt/property.c 	return __tb_property_parse_dir(block, block_len, 0, rootdir->length,
block             349 drivers/thunderbolt/property.c 	u32 *block, unsigned int start_offset, size_t block_len)
block             415 drivers/thunderbolt/property.c 		pe = (struct tb_property_dir_entry *)&block[start_offset];
block             421 drivers/thunderbolt/property.c 		re = (struct tb_property_rootdir_entry *)&block[start_offset];
block             436 drivers/thunderbolt/property.c 			ret = __tb_property_format_dir(child, block, dir_end,
block             447 drivers/thunderbolt/property.c 			format_dwdata(&block[data_offset], property->value.data,
block             455 drivers/thunderbolt/property.c 			format_dwdata(&block[data_offset], property->value.text,
block             488 drivers/thunderbolt/property.c ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
block             493 drivers/thunderbolt/property.c 	if (!block) {
block             500 drivers/thunderbolt/property.c 	ret = __tb_property_format_dir(dir, block, 0, block_len);
block             286 drivers/thunderbolt/xdomain.c 	u32 **block, u32 *generation)
block             362 drivers/thunderbolt/xdomain.c 	*block = data;
block             950 drivers/thunderbolt/xdomain.c 	u32 *block = NULL;
block             956 drivers/thunderbolt/xdomain.c 					&block, &gen);
block             986 drivers/thunderbolt/xdomain.c 	dir = tb_property_parse_dir(block, ret);
block            1011 drivers/thunderbolt/xdomain.c 	kfree(block);
block            1033 drivers/thunderbolt/xdomain.c 	kfree(block);
block            1569 drivers/thunderbolt/xdomain.c 	u32 *block, len;
block            1578 drivers/thunderbolt/xdomain.c 	block = kcalloc(len, sizeof(u32), GFP_KERNEL);
block            1579 drivers/thunderbolt/xdomain.c 	if (!block)
block            1582 drivers/thunderbolt/xdomain.c 	ret = tb_property_format_dir(xdomain_property_dir, block, len);
block            1584 drivers/thunderbolt/xdomain.c 		kfree(block);
block            1589 drivers/thunderbolt/xdomain.c 	xdomain_property_block = block;
block             801 drivers/tty/mips_ejtag_fdc.c 	int count, block;
block             822 drivers/tty/mips_ejtag_fdc.c 	for (count = total; count; count -= block) {
block             823 drivers/tty/mips_ejtag_fdc.c 		block = min(count, (int)(priv->xmit_size - dport->xmit_head));
block             824 drivers/tty/mips_ejtag_fdc.c 		memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
block             825 drivers/tty/mips_ejtag_fdc.c 		dport->xmit_head += block;
block             828 drivers/tty/mips_ejtag_fdc.c 		buf += block;
block             164 drivers/tty/n_r3964.c static void dump_block(const unsigned char *block, unsigned int length)
block             171 drivers/tty/n_r3964.c 			sprintf(linebuf + 3 * j, "%02x ", block[i + j]);
block             819 drivers/tty/n_r3964.c 	struct r3964_block_header *block;
block             830 drivers/tty/n_r3964.c 	block = pClient->next_block_to_read;
block             831 drivers/tty/n_r3964.c 	if (!block) {
block             834 drivers/tty/n_r3964.c 		if (copy_to_user(buf, block->data, block->length))
block             838 drivers/tty/n_r3964.c 		return block->length;
block             865 drivers/tty/n_r3964.c 		pMsg->block = pBlock;
block             917 drivers/tty/n_r3964.c 		if (pMsg->block) {
block             919 drivers/tty/n_r3964.c 			pClient->next_block_to_read = pMsg->block;
block             929 drivers/tty/n_r3964.c 	struct r3964_block_header *block;
block             933 drivers/tty/n_r3964.c 	block = pClient->next_block_to_read;
block             934 drivers/tty/n_r3964.c 	if (block) {
block             935 drivers/tty/n_r3964.c 		block->locks--;
block             936 drivers/tty/n_r3964.c 		if (block->locks == 0) {
block             937 drivers/tty/n_r3964.c 			remove_from_rx_queue(pInfo, block);
block             657 drivers/usb/gadget/udc/lpc32xx_udc.c 				  int block)
block             663 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (block)
block             408 drivers/usb/serial/f81534.c 	size_t block = 0;
block             462 drivers/usb/serial/f81534.c 			offset = count + block * F81534_MAX_DATA_BLOCK;
block             467 drivers/usb/serial/f81534.c 		++block;
block             552 drivers/usb/storage/datafab.c 	unsigned long block, blocks;
block             602 drivers/usb/storage/datafab.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             608 drivers/usb/storage/datafab.c 			     block, blocks);
block             609 drivers/usb/storage/datafab.c 		return datafab_read_data(us, info, block, blocks);
block             615 drivers/usb/storage/datafab.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             622 drivers/usb/storage/datafab.c 			     block, blocks);
block             623 drivers/usb/storage/datafab.c 		return datafab_read_data(us, info, block, blocks);
block             627 drivers/usb/storage/datafab.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             633 drivers/usb/storage/datafab.c 			     block, blocks);
block             634 drivers/usb/storage/datafab.c 		return datafab_write_data(us, info, block, blocks);
block             640 drivers/usb/storage/datafab.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             647 drivers/usb/storage/datafab.c 			     block, blocks);
block             648 drivers/usb/storage/datafab.c 		return datafab_write_data(us, info, block, blocks);
block             478 drivers/usb/storage/jumpshot.c 	unsigned long block, blocks;
block             530 drivers/usb/storage/jumpshot.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             536 drivers/usb/storage/jumpshot.c 			     block, blocks);
block             537 drivers/usb/storage/jumpshot.c 		return jumpshot_read_data(us, info, block, blocks);
block             543 drivers/usb/storage/jumpshot.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             550 drivers/usb/storage/jumpshot.c 			     block, blocks);
block             551 drivers/usb/storage/jumpshot.c 		return jumpshot_read_data(us, info, block, blocks);
block             555 drivers/usb/storage/jumpshot.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             561 drivers/usb/storage/jumpshot.c 			     block, blocks);
block             562 drivers/usb/storage/jumpshot.c 		return jumpshot_write_data(us, info, block, blocks);
block             568 drivers/usb/storage/jumpshot.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block             575 drivers/usb/storage/jumpshot.c 			     block, blocks);
block             576 drivers/usb/storage/jumpshot.c 		return jumpshot_write_data(us, info, block, blocks);
block            1684 drivers/usb/storage/shuttle_usbat.c 	unsigned long block, blocks;
block            1729 drivers/usb/storage/shuttle_usbat.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block            1735 drivers/usb/storage/shuttle_usbat.c 			     block, blocks);
block            1736 drivers/usb/storage/shuttle_usbat.c 		return usbat_flash_read_data(us, info, block, blocks);
block            1743 drivers/usb/storage/shuttle_usbat.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block            1750 drivers/usb/storage/shuttle_usbat.c 			     block, blocks);
block            1751 drivers/usb/storage/shuttle_usbat.c 		return usbat_flash_read_data(us, info, block, blocks);
block            1755 drivers/usb/storage/shuttle_usbat.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block            1761 drivers/usb/storage/shuttle_usbat.c 			     block, blocks);
block            1762 drivers/usb/storage/shuttle_usbat.c 		return usbat_flash_write_data(us, info, block, blocks);
block            1769 drivers/usb/storage/shuttle_usbat.c 		block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
block            1776 drivers/usb/storage/shuttle_usbat.c 			     block, blocks);
block            1777 drivers/usb/storage/shuttle_usbat.c 		return usbat_flash_write_data(us, info, block, blocks);
block              96 drivers/video/fbdev/core/fbmon.c static int edid_is_serial_block(unsigned char *block)
block              98 drivers/video/fbdev/core/fbmon.c 	if ((block[0] == 0x00) && (block[1] == 0x00) &&
block              99 drivers/video/fbdev/core/fbmon.c 	    (block[2] == 0x00) && (block[3] == 0xff) &&
block             100 drivers/video/fbdev/core/fbmon.c 	    (block[4] == 0x00))
block             106 drivers/video/fbdev/core/fbmon.c static int edid_is_ascii_block(unsigned char *block)
block             108 drivers/video/fbdev/core/fbmon.c 	if ((block[0] == 0x00) && (block[1] == 0x00) &&
block             109 drivers/video/fbdev/core/fbmon.c 	    (block[2] == 0x00) && (block[3] == 0xfe) &&
block             110 drivers/video/fbdev/core/fbmon.c 	    (block[4] == 0x00))
block             116 drivers/video/fbdev/core/fbmon.c static int edid_is_limits_block(unsigned char *block)
block             118 drivers/video/fbdev/core/fbmon.c 	if ((block[0] == 0x00) && (block[1] == 0x00) &&
block             119 drivers/video/fbdev/core/fbmon.c 	    (block[2] == 0x00) && (block[3] == 0xfd) &&
block             120 drivers/video/fbdev/core/fbmon.c 	    (block[4] == 0x00))
block             126 drivers/video/fbdev/core/fbmon.c static int edid_is_monitor_block(unsigned char *block)
block             128 drivers/video/fbdev/core/fbmon.c 	if ((block[0] == 0x00) && (block[1] == 0x00) &&
block             129 drivers/video/fbdev/core/fbmon.c 	    (block[2] == 0x00) && (block[3] == 0xfc) &&
block             130 drivers/video/fbdev/core/fbmon.c 	    (block[4] == 0x00))
block             136 drivers/video/fbdev/core/fbmon.c static int edid_is_timing_block(unsigned char *block)
block             138 drivers/video/fbdev/core/fbmon.c 	if ((block[0] != 0x00) || (block[1] != 0x00) ||
block             139 drivers/video/fbdev/core/fbmon.c 	    (block[2] != 0x00) || (block[4] != 0x00))
block             147 drivers/video/fbdev/core/fbmon.c 	unsigned char *block = edid + ID_MANUFACTURER_NAME, manufacturer[4];
block             152 drivers/video/fbdev/core/fbmon.c 	manufacturer[0] = ((block[0] & 0x7c) >> 2) + '@';
block             153 drivers/video/fbdev/core/fbmon.c 	manufacturer[1] = ((block[0] & 0x03) << 3) +
block             154 drivers/video/fbdev/core/fbmon.c 		((block[1] & 0xe0) >> 5) + '@';
block             155 drivers/video/fbdev/core/fbmon.c 	manufacturer[2] = (block[1] & 0x1f) + '@';
block             157 drivers/video/fbdev/core/fbmon.c 	model = block[2] + (block[3] << 8);
block             292 drivers/video/fbdev/core/fbmon.c static void parse_vendor_block(unsigned char *block, struct fb_monspecs *specs)
block             294 drivers/video/fbdev/core/fbmon.c 	specs->manufacturer[0] = ((block[0] & 0x7c) >> 2) + '@';
block             295 drivers/video/fbdev/core/fbmon.c 	specs->manufacturer[1] = ((block[0] & 0x03) << 3) +
block             296 drivers/video/fbdev/core/fbmon.c 		((block[1] & 0xe0) >> 5) + '@';
block             297 drivers/video/fbdev/core/fbmon.c 	specs->manufacturer[2] = (block[1] & 0x1f) + '@';
block             299 drivers/video/fbdev/core/fbmon.c 	specs->model = block[2] + (block[3] << 8);
block             300 drivers/video/fbdev/core/fbmon.c 	specs->serial = block[4] + (block[5] << 8) +
block             301 drivers/video/fbdev/core/fbmon.c 	       (block[6] << 16) + (block[7] << 24);
block             302 drivers/video/fbdev/core/fbmon.c 	specs->year = block[9] + 1990;
block             303 drivers/video/fbdev/core/fbmon.c 	specs->week = block[8];
block             326 drivers/video/fbdev/core/fbmon.c static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
block             332 drivers/video/fbdev/core/fbmon.c 	tmp = ((block[5] & (3 << 6)) >> 6) | (block[0x7] << 2);
block             338 drivers/video/fbdev/core/fbmon.c 	tmp = ((block[5] & (3 << 4)) >> 4) | (block[0x8] << 2);
block             344 drivers/video/fbdev/core/fbmon.c 	tmp = ((block[5] & (3 << 2)) >> 2) | (block[0x9] << 2);
block             350 drivers/video/fbdev/core/fbmon.c 	tmp = (block[5] & 3) | (block[0xa] << 2);
block             356 drivers/video/fbdev/core/fbmon.c 	tmp = ((block[6] & (3 << 6)) >> 6) | (block[0xb] << 2);
block             362 drivers/video/fbdev/core/fbmon.c 	tmp = ((block[6] & (3 << 4)) >> 4) | (block[0xc] << 2);
block             368 drivers/video/fbdev/core/fbmon.c 	tmp = ((block[6] & (3 << 2)) >> 2) | (block[0xd] << 2);
block             374 drivers/video/fbdev/core/fbmon.c 	tmp = (block[6] & 3) | (block[0xe] << 2);
block             409 drivers/video/fbdev/core/fbmon.c static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
block             414 drivers/video/fbdev/core/fbmon.c 	c = block[0];
block             451 drivers/video/fbdev/core/fbmon.c 	c = block[1];
block             485 drivers/video/fbdev/core/fbmon.c 	c = block[2];
block             494 drivers/video/fbdev/core/fbmon.c static int get_std_timing(unsigned char *block, struct fb_videomode *mode,
block             500 drivers/video/fbdev/core/fbmon.c 		u32 std_2byte_code = block[0] << 8 | block[1];
block             514 drivers/video/fbdev/core/fbmon.c 		xres = (block[0] + 31) * 8;
block             518 drivers/video/fbdev/core/fbmon.c 		ratio = (block[1] & 0xc0) >> 6;
block             537 drivers/video/fbdev/core/fbmon.c 		refresh = (block[1] & 0x3f) + 60;
block             553 drivers/video/fbdev/core/fbmon.c static int get_dst_timing(unsigned char *block, struct fb_videomode *mode,
block             558 drivers/video/fbdev/core/fbmon.c 	for (j = 0; j < 6; j++, block += STD_TIMING_DESCRIPTION_SIZE)
block             559 drivers/video/fbdev/core/fbmon.c 		num += get_std_timing(block, &mode[num], ver, rev, specs);
block             564 drivers/video/fbdev/core/fbmon.c static void get_detailed_timing(unsigned char *block,
block             619 drivers/video/fbdev/core/fbmon.c 	unsigned char *block;
block             639 drivers/video/fbdev/core/fbmon.c 	block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
block             640 drivers/video/fbdev/core/fbmon.c 	for (i = 0; i < 4; i++, block+= DETAILED_TIMING_DESCRIPTION_SIZE) {
block             641 drivers/video/fbdev/core/fbmon.c 		if (!(block[0] == 0x00 && block[1] == 0x00)) {
block             642 drivers/video/fbdev/core/fbmon.c 			get_detailed_timing(block, &mode[num]);
block             652 drivers/video/fbdev/core/fbmon.c 	block = edid + ESTABLISHED_TIMING_1;
block             653 drivers/video/fbdev/core/fbmon.c 	num += get_est_timing(block, &mode[num]);
block             656 drivers/video/fbdev/core/fbmon.c 	block = edid + STD_TIMING_DESCRIPTIONS_START;
block             657 drivers/video/fbdev/core/fbmon.c 	for (i = 0; i < STD_TIMING; i++, block += STD_TIMING_DESCRIPTION_SIZE)
block             658 drivers/video/fbdev/core/fbmon.c 		num += get_std_timing(block, &mode[num], ver, rev, specs);
block             660 drivers/video/fbdev/core/fbmon.c 	block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
block             661 drivers/video/fbdev/core/fbmon.c 	for (i = 0; i < 4; i++, block+= DETAILED_TIMING_DESCRIPTION_SIZE) {
block             662 drivers/video/fbdev/core/fbmon.c 		if (block[0] == 0x00 && block[1] == 0x00 && block[3] == 0xfa)
block             663 drivers/video/fbdev/core/fbmon.c 			num += get_dst_timing(block + 5, &mode[num],
block             697 drivers/video/fbdev/core/fbmon.c 	unsigned char *block;
block             699 drivers/video/fbdev/core/fbmon.c 	block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
block             703 drivers/video/fbdev/core/fbmon.c 	for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
block             704 drivers/video/fbdev/core/fbmon.c 		if (edid_is_limits_block(block)) {
block             777 drivers/video/fbdev/core/fbmon.c 	unsigned char c, *block;
block             779 drivers/video/fbdev/core/fbmon.c 	block = edid + EDID_STRUCT_DISPLAY;
block             783 drivers/video/fbdev/core/fbmon.c 	c = block[0] & 0x80;
block             790 drivers/video/fbdev/core/fbmon.c 		switch ((block[0] & 0x60) >> 5) {
block             810 drivers/video/fbdev/core/fbmon.c 	c = block[0] & 0x10;
block             813 drivers/video/fbdev/core/fbmon.c 	c = block[0] & 0x0f;
block             836 drivers/video/fbdev/core/fbmon.c 	specs->max_x = block[1];
block             837 drivers/video/fbdev/core/fbmon.c 	specs->max_y = block[2];
block             849 drivers/video/fbdev/core/fbmon.c 	c = block[3];
block             854 drivers/video/fbdev/core/fbmon.c 	get_dpms_capabilities(block[4], specs);
block             856 drivers/video/fbdev/core/fbmon.c 	switch ((block[4] & 0x18) >> 3) {
block             875 drivers/video/fbdev/core/fbmon.c 	get_chroma(block, specs);
block             878 drivers/video/fbdev/core/fbmon.c 	c = block[4] & 0x7;
block             896 drivers/video/fbdev/core/fbmon.c 	unsigned char *block;
block             907 drivers/video/fbdev/core/fbmon.c 	block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
block             909 drivers/video/fbdev/core/fbmon.c 	for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
block             910 drivers/video/fbdev/core/fbmon.c 		if (edid_is_timing_block(block)) {
block             938 drivers/video/fbdev/core/fbmon.c 	unsigned char *block;
block             963 drivers/video/fbdev/core/fbmon.c 	block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
block             964 drivers/video/fbdev/core/fbmon.c 	for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
block             965 drivers/video/fbdev/core/fbmon.c 		if (edid_is_serial_block(block)) {
block             966 drivers/video/fbdev/core/fbmon.c 			copy_string(block, specs->serial_no);
block             968 drivers/video/fbdev/core/fbmon.c 		} else if (edid_is_ascii_block(block)) {
block             969 drivers/video/fbdev/core/fbmon.c 			copy_string(block, specs->ascii);
block             971 drivers/video/fbdev/core/fbmon.c 		} else if (edid_is_monitor_block(block)) {
block             972 drivers/video/fbdev/core/fbmon.c 			copy_string(block, specs->monitor);
block              72 drivers/video/fbdev/edid.h #define PIXEL_CLOCK_LO     (unsigned)block[ 0 ]
block              73 drivers/video/fbdev/edid.h #define PIXEL_CLOCK_HI     (unsigned)block[ 1 ]
block              75 drivers/video/fbdev/edid.h #define H_ACTIVE_LO        (unsigned)block[ 2 ]
block              76 drivers/video/fbdev/edid.h #define H_BLANKING_LO      (unsigned)block[ 3 ]
block              77 drivers/video/fbdev/edid.h #define H_ACTIVE_HI        UPPER_NIBBLE( (unsigned)block[ 4 ] )
block              79 drivers/video/fbdev/edid.h #define H_BLANKING_HI      LOWER_NIBBLE( (unsigned)block[ 4 ] )
block              82 drivers/video/fbdev/edid.h #define V_ACTIVE_LO        (unsigned)block[ 5 ]
block              83 drivers/video/fbdev/edid.h #define V_BLANKING_LO      (unsigned)block[ 6 ]
block              84 drivers/video/fbdev/edid.h #define V_ACTIVE_HI        UPPER_NIBBLE( (unsigned)block[ 7 ] )
block              86 drivers/video/fbdev/edid.h #define V_BLANKING_HI      LOWER_NIBBLE( (unsigned)block[ 7 ] )
block              89 drivers/video/fbdev/edid.h #define H_SYNC_OFFSET_LO   (unsigned)block[ 8 ]
block              90 drivers/video/fbdev/edid.h #define H_SYNC_WIDTH_LO    (unsigned)block[ 9 ]
block              92 drivers/video/fbdev/edid.h #define V_SYNC_OFFSET_LO   UPPER_NIBBLE( (unsigned)block[ 10 ] )
block              93 drivers/video/fbdev/edid.h #define V_SYNC_WIDTH_LO    LOWER_NIBBLE( (unsigned)block[ 10 ] )
block              95 drivers/video/fbdev/edid.h #define V_SYNC_WIDTH_HI    ((unsigned)block[ 11 ] & (1|2))
block              96 drivers/video/fbdev/edid.h #define V_SYNC_OFFSET_HI   (((unsigned)block[ 11 ] & (4|8)) >> 2)
block              98 drivers/video/fbdev/edid.h #define H_SYNC_WIDTH_HI    (((unsigned)block[ 11 ] & (16|32)) >> 4)
block              99 drivers/video/fbdev/edid.h #define H_SYNC_OFFSET_HI   (((unsigned)block[ 11 ] & (64|128)) >> 6)
block             107 drivers/video/fbdev/edid.h #define H_SIZE_LO          (unsigned)block[ 12 ]
block             108 drivers/video/fbdev/edid.h #define V_SIZE_LO          (unsigned)block[ 13 ]
block             110 drivers/video/fbdev/edid.h #define H_SIZE_HI          UPPER_NIBBLE( (unsigned)block[ 14 ] )
block             111 drivers/video/fbdev/edid.h #define V_SIZE_HI          LOWER_NIBBLE( (unsigned)block[ 14 ] )
block             116 drivers/video/fbdev/edid.h #define H_BORDER           (unsigned)block[ 15 ]
block             117 drivers/video/fbdev/edid.h #define V_BORDER           (unsigned)block[ 16 ]
block             119 drivers/video/fbdev/edid.h #define FLAGS              (unsigned)block[ 17 ]
block             127 drivers/video/fbdev/edid.h #define V_MIN_RATE              block[ 5 ]
block             128 drivers/video/fbdev/edid.h #define V_MAX_RATE              block[ 6 ]
block             129 drivers/video/fbdev/edid.h #define H_MIN_RATE              block[ 7 ]
block             130 drivers/video/fbdev/edid.h #define H_MAX_RATE              block[ 8 ]
block             131 drivers/video/fbdev/edid.h #define MAX_PIXEL_CLOCK         (((int)block[ 9 ]) * 10)
block             132 drivers/video/fbdev/edid.h #define GTF_SUPPORT		block[10]
block              62 drivers/w1/slaves/w1_ds2433.c 				int block)
block              65 drivers/w1/slaves/w1_ds2433.c 	int	off = block * W1_PAGE_SIZE;
block              67 drivers/w1/slaves/w1_ds2433.c 	if (data->validcrc & (1 << block))
block              83 drivers/w1/slaves/w1_ds2433.c 		data->validcrc |= (1 << block);
block              72 drivers/w1/slaves/w1_ds28e04.c 				int block)
block              75 drivers/w1/slaves/w1_ds28e04.c 	int	off = block * W1_PAGE_SIZE;
block              77 drivers/w1/slaves/w1_ds28e04.c 	if (data->validcrc & (1 << block))
block              93 drivers/w1/slaves/w1_ds28e04.c 		data->validcrc |= (1 << block);
block              41 drivers/w1/w1_netlink.c 	struct w1_cb_block *block;
block              55 drivers/w1/w1_netlink.c static u16 w1_reply_len(struct w1_cb_block *block)
block              57 drivers/w1/w1_netlink.c 	if (!block->cn)
block              59 drivers/w1/w1_netlink.c 	return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len;
block              62 drivers/w1/w1_netlink.c static void w1_unref_block(struct w1_cb_block *block)
block              64 drivers/w1/w1_netlink.c 	if (atomic_sub_return(1, &block->refcnt) == 0) {
block              65 drivers/w1/w1_netlink.c 		u16 len = w1_reply_len(block);
block              67 drivers/w1/w1_netlink.c 			cn_netlink_send_mult(block->first_cn, len,
block              68 drivers/w1/w1_netlink.c 				block->portid, 0, GFP_KERNEL);
block              70 drivers/w1/w1_netlink.c 		kfree(block);
block              82 drivers/w1/w1_netlink.c static void w1_reply_make_space(struct w1_cb_block *block, u16 space)
block              84 drivers/w1/w1_netlink.c 	u16 len = w1_reply_len(block);
block              85 drivers/w1/w1_netlink.c 	if (len + space >= block->maxlen) {
block              86 drivers/w1/w1_netlink.c 		cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL);
block              87 drivers/w1/w1_netlink.c 		block->first_cn->len = 0;
block              88 drivers/w1/w1_netlink.c 		block->cn = NULL;
block              89 drivers/w1/w1_netlink.c 		block->msg = NULL;
block              90 drivers/w1/w1_netlink.c 		block->cmd = NULL;
block              95 drivers/w1/w1_netlink.c static void w1_netlink_check_send(struct w1_cb_block *block)
block              97 drivers/w1/w1_netlink.c 	if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn)
block              98 drivers/w1/w1_netlink.c 		w1_reply_make_space(block, block->maxlen);
block             110 drivers/w1/w1_netlink.c static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack)
block             112 drivers/w1/w1_netlink.c 	if (block->cn && block->cn->ack == ack) {
block             113 drivers/w1/w1_netlink.c 		block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len);
block             116 drivers/w1/w1_netlink.c 		if (block->cn)
block             117 drivers/w1/w1_netlink.c 			block->cn = (struct cn_msg *)(block->cn->data +
block             118 drivers/w1/w1_netlink.c 				block->cn->len);
block             120 drivers/w1/w1_netlink.c 			block->cn = block->first_cn;
block             122 drivers/w1/w1_netlink.c 		memcpy(block->cn, &block->request_cn, sizeof(*block->cn));
block             123 drivers/w1/w1_netlink.c 		block->cn->len = 0;
block             124 drivers/w1/w1_netlink.c 		block->cn->ack = ack;
block             125 drivers/w1/w1_netlink.c 		block->msg = (struct w1_netlink_msg *)block->cn->data;
block             133 drivers/w1/w1_netlink.c static void w1_netlink_queue_cmd(struct w1_cb_block *block,
block             137 drivers/w1/w1_netlink.c 	w1_reply_make_space(block, sizeof(struct cn_msg) +
block             144 drivers/w1/w1_netlink.c 	w1_netlink_setup_msg(block, block->request_cn.seq + 1);
block             145 drivers/w1/w1_netlink.c 	memcpy(block->msg, block->cur_msg, sizeof(*block->msg));
block             146 drivers/w1/w1_netlink.c 	block->cn->len += sizeof(*block->msg);
block             147 drivers/w1/w1_netlink.c 	block->msg->len = 0;
block             148 drivers/w1/w1_netlink.c 	block->cmd = (struct w1_netlink_cmd *)(block->msg->data);
block             151 drivers/w1/w1_netlink.c 	if (block->cmd != cmd)
block             152 drivers/w1/w1_netlink.c 		memcpy(block->cmd, cmd, space);
block             153 drivers/w1/w1_netlink.c 	block->cn->len += space;
block             154 drivers/w1/w1_netlink.c 	block->msg->len += space;
block             160 drivers/w1/w1_netlink.c static void w1_netlink_queue_status(struct w1_cb_block *block,
block             165 drivers/w1/w1_netlink.c 	w1_reply_make_space(block, space);
block             166 drivers/w1/w1_netlink.c 	w1_netlink_setup_msg(block, block->request_cn.ack);
block             168 drivers/w1/w1_netlink.c 	memcpy(block->msg, req_msg, sizeof(*req_msg));
block             169 drivers/w1/w1_netlink.c 	block->cn->len += sizeof(*req_msg);
block             170 drivers/w1/w1_netlink.c 	block->msg->len = 0;
block             171 drivers/w1/w1_netlink.c 	block->msg->status = (u8)-error;
block             173 drivers/w1/w1_netlink.c 		struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data;
block             175 drivers/w1/w1_netlink.c 		block->cn->len += sizeof(*cmd);
block             176 drivers/w1/w1_netlink.c 		block->msg->len += sizeof(*cmd);
block             179 drivers/w1/w1_netlink.c 	w1_netlink_check_send(block);
block             236 drivers/w1/w1_netlink.c 	struct w1_cb_block *block = dev->priv;
block             237 drivers/w1/w1_netlink.c 	struct w1_netlink_cmd *cache_cmd = block->cmd;
block             240 drivers/w1/w1_netlink.c 	w1_reply_make_space(block, sizeof(*data));
block             243 drivers/w1/w1_netlink.c 	if (!block->cmd) {
block             245 drivers/w1/w1_netlink.c 		w1_netlink_queue_cmd(block, cache_cmd);
block             248 drivers/w1/w1_netlink.c 	data = (u64 *)(block->cmd->data + block->cmd->len);
block             251 drivers/w1/w1_netlink.c 	block->cn->len += sizeof(*data);
block             252 drivers/w1/w1_netlink.c 	block->msg->len += sizeof(*data);
block             253 drivers/w1/w1_netlink.c 	block->cmd->len += sizeof(*data);
block             453 drivers/w1/w1_netlink.c 	dev->priv = node->block;
block             456 drivers/w1/w1_netlink.c 	node->block->cur_msg = node->msg;
block             468 drivers/w1/w1_netlink.c 		w1_netlink_check_send(node->block);
block             470 drivers/w1/w1_netlink.c 		w1_netlink_queue_status(node->block, node->msg, cmd, err);
block             479 drivers/w1/w1_netlink.c 		w1_netlink_queue_status(node->block, node->msg, cmd, err);
block             495 drivers/w1/w1_netlink.c 	w1_unref_block(node->block);
block             543 drivers/w1/w1_netlink.c 	struct w1_cb_block *block = NULL;
block             603 drivers/w1/w1_netlink.c 		block = kzalloc(size, GFP_KERNEL);
block             604 drivers/w1/w1_netlink.c 		if (!block) {
block             612 drivers/w1/w1_netlink.c 		atomic_set(&block->refcnt, 1);
block             613 drivers/w1/w1_netlink.c 		block->portid = nsp->portid;
block             614 drivers/w1/w1_netlink.c 		memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len);
block             615 drivers/w1/w1_netlink.c 		node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
block             626 drivers/w1/w1_netlink.c 		block->maxlen = reply_size;
block             627 drivers/w1/w1_netlink.c 		block->first_cn = (struct cn_msg *)(node + node_count);
block             628 drivers/w1/w1_netlink.c 		memset(block->first_cn, 0, sizeof(*block->first_cn));
block             678 drivers/w1/w1_netlink.c 		atomic_inc(&block->refcnt);
block             680 drivers/w1/w1_netlink.c 		node->block = block;
block             681 drivers/w1/w1_netlink.c 		node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn +
block             709 drivers/w1/w1_netlink.c 	if (block)
block             710 drivers/w1/w1_netlink.c 		w1_unref_block(block);
block             194 fs/adfs/adfs.h 				   unsigned int block)
block             200 fs/adfs/adfs.h 		block += off << ADFS_SB(sb)->s_log2sharesize;
block             203 fs/adfs/adfs.h 	return adfs_map_lookup(sb, indaddr >> 8, block);
block              16 fs/adfs/dir_fplus.c 	unsigned long block;
block              25 fs/adfs/dir_fplus.c 	block = __adfs_block_map(sb, id, 0);
block              26 fs/adfs/dir_fplus.c 	if (!block) {
block              31 fs/adfs/dir_fplus.c 	dir->bh_fplus[0] = sb_bread(sb, block);
block              70 fs/adfs/dir_fplus.c 		block = __adfs_block_map(sb, id, blk);
block              71 fs/adfs/dir_fplus.c 		if (!block) {
block              76 fs/adfs/dir_fplus.c 		dir->bh_fplus[blk] = sb_bread(sb, block);
block              79 fs/adfs/dir_fplus.c 				   id, blk, block);
block              16 fs/adfs/inode.c adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
block              20 fs/adfs/inode.c 		if (block >= inode->i_blocks)
block              23 fs/adfs/inode.c 		block = __adfs_block_map(inode->i_sb, inode->i_ino, block);
block              24 fs/adfs/inode.c 		if (block)
block              25 fs/adfs/inode.c 			map_bh(bh, inode->i_sb, block);
block              69 fs/adfs/inode.c static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
block              71 fs/adfs/inode.c 	return generic_block_bmap(mapping, block, adfs_get_block);
block             159 fs/affs/affs.h extern void	affs_free_block(struct super_block *sb, u32 block);
block             217 fs/affs/affs.h static inline bool affs_validblock(struct super_block *sb, int block)
block             219 fs/affs/affs.h 	return(block >= AFFS_SB(sb)->s_reserved &&
block             220 fs/affs/affs.h 	       block < AFFS_SB(sb)->s_partition_size);
block             229 fs/affs/affs.h affs_bread(struct super_block *sb, int block)
block             231 fs/affs/affs.h 	pr_debug("%s: %d\n", __func__, block);
block             232 fs/affs/affs.h 	if (affs_validblock(sb, block))
block             233 fs/affs/affs.h 		return sb_bread(sb, block);
block             237 fs/affs/affs.h affs_getblk(struct super_block *sb, int block)
block             239 fs/affs/affs.h 	pr_debug("%s: %d\n", __func__, block);
block             240 fs/affs/affs.h 	if (affs_validblock(sb, block))
block             241 fs/affs/affs.h 		return sb_getblk(sb, block);
block             245 fs/affs/affs.h affs_getzeroblk(struct super_block *sb, int block)
block             248 fs/affs/affs.h 	pr_debug("%s: %d\n", __func__, block);
block             249 fs/affs/affs.h 	if (affs_validblock(sb, block)) {
block             250 fs/affs/affs.h 		bh = sb_getblk(sb, block);
block             260 fs/affs/affs.h affs_getemptyblk(struct super_block *sb, int block)
block             263 fs/affs/affs.h 	pr_debug("%s: %d\n", __func__, block);
block             264 fs/affs/affs.h 	if (affs_validblock(sb, block)) {
block             265 fs/affs/affs.h 		bh = sb_getblk(sb, block);
block              39 fs/affs/bitmap.c affs_free_block(struct super_block *sb, u32 block)
block              47 fs/affs/bitmap.c 	pr_debug("%s(%u)\n", __func__, block);
block              49 fs/affs/bitmap.c 	if (block > sbi->s_partition_size)
block              52 fs/affs/bitmap.c 	blk     = block - sbi->s_reserved;
block              90 fs/affs/bitmap.c 	affs_warning(sb,"affs_free_block","Trying to free block %u which is already free", block);
block             102 fs/affs/bitmap.c 	affs_error(sb, "affs_free_block","Block %u outside partition", block);
block             295 fs/affs/file.c affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
block             302 fs/affs/file.c 		 (unsigned long long)block);
block             304 fs/affs/file.c 	BUG_ON(block > (sector_t)0x7fffffffUL);
block             306 fs/affs/file.c 	if (block >= AFFS_I(inode)->i_blkcnt) {
block             307 fs/affs/file.c 		if (block > AFFS_I(inode)->i_blkcnt || !create)
block             315 fs/affs/file.c 	ext = (u32)block / AFFS_SB(sb)->s_hashsize;
block             316 fs/affs/file.c 	block -= ext * AFFS_SB(sb)->s_hashsize;
block             320 fs/affs/file.c 	map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
block             335 fs/affs/file.c 		AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
block             336 fs/affs/file.c 		AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
block             340 fs/affs/file.c 		if (!block) {
block             357 fs/affs/file.c 		   (unsigned long long)block);
block             431 fs/affs/file.c static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
block             433 fs/affs/file.c 	return generic_block_bmap(mapping,block,affs_get_block);
block             446 fs/affs/file.c affs_bread_ino(struct inode *inode, int block, int create)
block             452 fs/affs/file.c 	err = affs_get_block(inode, block, &tmp_bh, create);
block             465 fs/affs/file.c affs_getzeroblk_ino(struct inode *inode, int block)
block             471 fs/affs/file.c 	err = affs_get_block(inode, block, &tmp_bh, 1);
block             484 fs/affs/file.c affs_getemptyblk_ino(struct inode *inode, int block)
block             490 fs/affs/file.c 	err = affs_get_block(inode, block, &tmp_bh, 1);
block              24 fs/affs/inode.c 	u32			 block;
block              37 fs/affs/inode.c 	block = inode->i_ino;
block              38 fs/affs/inode.c 	bh = affs_bread(sb, block);
block              40 fs/affs/inode.c 		affs_warning(sb, "read_inode", "Cannot read block %d", block);
block              46 fs/affs/inode.c 			   AFFS_HEAD(bh)->ptype, block);
block             297 fs/affs/inode.c 	u32			 block;
block             303 fs/affs/inode.c 	if (!(block = affs_alloc_block(dir, dir->i_ino)))
block             306 fs/affs/inode.c 	bh = affs_getzeroblk(sb, block);
block             314 fs/affs/inode.c 	inode->i_ino     = block;
block             337 fs/affs/inode.c 	affs_free_block(sb, block);
block             355 fs/affs/inode.c 	u32 block = 0;
block             371 fs/affs/inode.c 		block = affs_alloc_block(dir, dir->i_ino);
block             372 fs/affs/inode.c 		if (!block)
block             376 fs/affs/inode.c 		bh = affs_getzeroblk(sb, block);
block             395 fs/affs/inode.c 		AFFS_TAIL(sb, inode_bh)->link_chain = cpu_to_be32(block);
block             396 fs/affs/inode.c 		affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain));
block             417 fs/affs/inode.c 	if (block)
block             418 fs/affs/inode.c 		affs_free_block(sb, block);
block             184 fs/afs/dir.c   			union afs_xdr_dir_block *block = &dbuf->blocks[j];
block             186 fs/afs/dir.c   			pr_warn("[%02x] %32phN\n", i * qty + j, block);
block             352 fs/afs/dir.c   				 union afs_xdr_dir_block *block,
block             360 fs/afs/dir.c   	_enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block);
block             372 fs/afs/dir.c   		if (!(block->hdr.bitmap[offset / 8] &
block             383 fs/afs/dir.c   		dire = &block->dirents[offset];
block             385 fs/afs/dir.c   			       sizeof(*block) -
block             403 fs/afs/dir.c   			if (!(block->hdr.bitmap[next / 8] &
block              23 fs/afs/dir_edit.c static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_slots)
block              29 fs/afs/dir_edit.c 	bitmap  = (u64)block->hdr.bitmap[0] << 0 * 8;
block              30 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8;
block              31 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8;
block              32 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8;
block              33 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8;
block              34 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8;
block              35 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8;
block              36 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8;
block              68 fs/afs/dir_edit.c static void afs_set_contig_bits(union afs_xdr_dir_block *block,
block              76 fs/afs/dir_edit.c 	block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
block              77 fs/afs/dir_edit.c 	block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
block              78 fs/afs/dir_edit.c 	block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
block              79 fs/afs/dir_edit.c 	block->hdr.bitmap[3] |= (u8)(mask >> 3 * 8);
block              80 fs/afs/dir_edit.c 	block->hdr.bitmap[4] |= (u8)(mask >> 4 * 8);
block              81 fs/afs/dir_edit.c 	block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
block              82 fs/afs/dir_edit.c 	block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
block              83 fs/afs/dir_edit.c 	block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
block              89 fs/afs/dir_edit.c static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
block              97 fs/afs/dir_edit.c 	block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
block              98 fs/afs/dir_edit.c 	block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
block              99 fs/afs/dir_edit.c 	block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
block             100 fs/afs/dir_edit.c 	block->hdr.bitmap[3] &= ~(u8)(mask >> 3 * 8);
block             101 fs/afs/dir_edit.c 	block->hdr.bitmap[4] &= ~(u8)(mask >> 4 * 8);
block             102 fs/afs/dir_edit.c 	block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
block             103 fs/afs/dir_edit.c 	block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
block             104 fs/afs/dir_edit.c 	block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
block             110 fs/afs/dir_edit.c static int afs_dir_scan_block(union afs_xdr_dir_block *block, struct qstr *name,
block             119 fs/afs/dir_edit.c 	bitmap  = (u64)block->hdr.bitmap[0] << 0 * 8;
block             120 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8;
block             121 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8;
block             122 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8;
block             123 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8;
block             124 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8;
block             125 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8;
block             126 fs/afs/dir_edit.c 	bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8;
block             133 fs/afs/dir_edit.c 		de = &block->dirents[d];
block             156 fs/afs/dir_edit.c 				union afs_xdr_dir_block *block, int block_num)
block             158 fs/afs/dir_edit.c 	memset(block, 0, sizeof(*block));
block             159 fs/afs/dir_edit.c 	block->hdr.npages = htons(1);
block             160 fs/afs/dir_edit.c 	block->hdr.magic = AFS_DIR_MAGIC;
block             161 fs/afs/dir_edit.c 	block->hdr.bitmap[0] = 1;
block             164 fs/afs/dir_edit.c 		block->hdr.bitmap[0] = 0xff;
block             165 fs/afs/dir_edit.c 		block->hdr.bitmap[1] = 0x1f;
block             166 fs/afs/dir_edit.c 		memset(block->meta.alloc_ctrs,
block             168 fs/afs/dir_edit.c 		       sizeof(block->meta.alloc_ctrs));
block             190 fs/afs/dir_edit.c 	union afs_xdr_dir_block *meta, *block;
block             257 fs/afs/dir_edit.c 		block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
block             262 fs/afs/dir_edit.c 		       ntohs(block->hdr.npages),
block             263 fs/afs/dir_edit.c 		       ntohs(block->hdr.magic));
block             268 fs/afs/dir_edit.c 			afs_edit_init_block(meta, block, b);
block             278 fs/afs/dir_edit.c 			slot = afs_find_contig_bits(block, need_slots);
block             305 fs/afs/dir_edit.c 	block = meta;
block             313 fs/afs/dir_edit.c 	de = &block->dirents[slot];
block             323 fs/afs/dir_edit.c 	afs_set_contig_bits(block, slot, need_slots);
block             371 fs/afs/dir_edit.c 	union afs_xdr_dir_block *meta, *block;
block             423 fs/afs/dir_edit.c 		block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
block             427 fs/afs/dir_edit.c 			slot = afs_dir_scan_block(block, name, b);
block             446 fs/afs/dir_edit.c 	de = &block->dirents[slot];
block             455 fs/afs/dir_edit.c 	afs_clear_contig_bits(block, slot, need_slots);
block              54 fs/befs/datastream.c 	befs_blocknr_t block;	/* block coresponding to pos */
block              57 fs/befs/datastream.c 	block = pos >> BEFS_SB(sb)->block_shift;
block              59 fs/befs/datastream.c 		*off = pos - (block << BEFS_SB(sb)->block_shift);
block              61 fs/befs/datastream.c 	if (befs_fblock2brun(sb, ds, block, &run) != BEFS_OK) {
block              63 fs/befs/datastream.c 			   (unsigned long)block);
block              70 fs/befs/datastream.c 			   (unsigned long)block);
block              30 fs/befs/io.c   	befs_blocknr_t block;
block              43 fs/befs/io.c   	block = iaddr2blockno(sb, &iaddr);
block              45 fs/befs/io.c   	befs_debug(sb, "%s: offset = %lu", __func__, (unsigned long)block);
block              47 fs/befs/io.c   	bh = sb_bread(sb, block);
block              51 fs/befs/io.c   			   (unsigned long)block);
block              43 fs/befs/linuxvfs.c static sector_t befs_bmap(struct address_space *mapping, sector_t block);
block             117 fs/befs/linuxvfs.c befs_bmap(struct address_space *mapping, sector_t block)
block             119 fs/befs/linuxvfs.c 	return generic_block_bmap(mapping, block, befs_get_block);
block             132 fs/befs/linuxvfs.c befs_get_block(struct inode *inode, sector_t block,
block             142 fs/befs/linuxvfs.c 		   (unsigned long)inode->i_ino, (long)block);
block             145 fs/befs/linuxvfs.c 			   "block %ld in inode %lu", (long)block,
block             150 fs/befs/linuxvfs.c 	res = befs_fblock2brun(sb, ds, block, &run);
block             155 fs/befs/linuxvfs.c 			   (long)block);
block             164 fs/befs/linuxvfs.c 		  __func__, (unsigned long)inode->i_ino, (long)block,
block              35 fs/bfs/dir.c   	int block;
block              46 fs/bfs/dir.c   		block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS);
block              47 fs/bfs/dir.c   		bh = sb_bread(dir->i_sb, block);
block             273 fs/bfs/dir.c   	int block, sblock, eblock, off, pos;
block             285 fs/bfs/dir.c   	for (block = sblock; block <= eblock; block++) {
block             286 fs/bfs/dir.c   		bh = sb_bread(dir->i_sb, block);
block             292 fs/bfs/dir.c   				pos = (block - sblock) * BFS_BSIZE + off;
block             325 fs/bfs/dir.c   	unsigned long block = 0, offset = 0;
block             335 fs/bfs/dir.c   	while (block * BFS_BSIZE + offset < dir->i_size) {
block             337 fs/bfs/dir.c   			bh = sb_bread(dir->i_sb, BFS_I(dir)->i_sblock + block);
block             339 fs/bfs/dir.c   				block++;
block             355 fs/bfs/dir.c   		block++;
block              64 fs/bfs/file.c  static int bfs_get_block(struct inode *inode, sector_t block,
block              73 fs/bfs/file.c  	phys = bi->i_sblock + block;
block              77 fs/bfs/file.c                                  create, (unsigned long)block, phys);
block              89 fs/bfs/file.c  				create, (unsigned long)block, phys);
block             108 fs/bfs/file.c  				create, (unsigned long)block, phys);
block             119 fs/bfs/file.c  	if (phys + block >= info->si_blocks) {
block             136 fs/bfs/file.c                  create, (unsigned long)block, phys);
block             138 fs/bfs/file.c  	phys += block;
block             185 fs/bfs/file.c  static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
block             187 fs/bfs/file.c  	return generic_block_bmap(mapping, block, bfs_get_block);
block              39 fs/bfs/inode.c 	int block, off;
block              52 fs/bfs/inode.c 	block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
block              53 fs/bfs/inode.c 	bh = sb_bread(inode->i_sb, block);
block             391 fs/bfs/inode.c 		int block = (i - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
block             397 fs/bfs/inode.c 			bh = sb_bread(s, block);
block             228 fs/btrfs/check-integrity.c 	struct btrfsic_block *block;
block             293 fs/btrfs/check-integrity.c 				     struct btrfsic_block *block,
block             301 fs/btrfs/check-integrity.c 		struct btrfsic_block *block,
block             312 fs/btrfs/check-integrity.c 				      struct btrfsic_block *block,
block             332 fs/btrfs/check-integrity.c 		struct btrfsic_block *const block,
block             337 fs/btrfs/check-integrity.c 					      const struct btrfsic_block *block,
block             340 fs/btrfs/check-integrity.c 					struct btrfsic_block *const block,
block             347 fs/btrfs/check-integrity.c 				   const struct btrfsic_block *block);
block             350 fs/btrfs/check-integrity.c 				  const struct btrfsic_block *block,
block             959 fs/btrfs/check-integrity.c 	sf->block = first_block;
block             966 fs/btrfs/check-integrity.c 	sf->block->generation = le64_to_cpu(sf->hdr->generation);
block            1035 fs/btrfs/check-integrity.c 						sf->block,
block            1066 fs/btrfs/check-integrity.c 					next_stack->block = sf->next_block;
block            1081 fs/btrfs/check-integrity.c 						sf->block,
block            1134 fs/btrfs/check-integrity.c 					sf->block,
block            1160 fs/btrfs/check-integrity.c 				next_stack->block = sf->next_block;
block            1228 fs/btrfs/check-integrity.c 		struct btrfsic_block *block,
block            1328 fs/btrfs/check-integrity.c 		l->block_ref_from = block;
block            1335 fs/btrfs/check-integrity.c 		list_add(&l->node_ref_to, &block->ref_to_list);
block            1371 fs/btrfs/check-integrity.c 		struct btrfsic_block *block,
block            1500 fs/btrfs/check-integrity.c 							     next_block, block,
block            1751 fs/btrfs/check-integrity.c 	struct btrfsic_block *block;
block            1769 fs/btrfs/check-integrity.c 	block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
block            1771 fs/btrfs/check-integrity.c 	if (NULL != block) {
block            1775 fs/btrfs/check-integrity.c 		if (block->is_superblock) {
block            1789 fs/btrfs/check-integrity.c 				btrfsic_dump_tree_sub(state, block, 0);
block            1793 fs/btrfs/check-integrity.c 			if (!block->is_superblock) {
block            1808 fs/btrfs/check-integrity.c 				if (block->logical_bytenr != bytenr &&
block            1809 fs/btrfs/check-integrity.c 				    !(!block->is_metadata &&
block            1810 fs/btrfs/check-integrity.c 				      block->logical_bytenr == 0))
block            1814 fs/btrfs/check-integrity.c 					       block->mirror_num,
block            1816 fs/btrfs/check-integrity.c 								      block),
block            1817 fs/btrfs/check-integrity.c 					       block->logical_bytenr);
block            1821 fs/btrfs/check-integrity.c 					       dev_bytenr, block->mirror_num,
block            1823 fs/btrfs/check-integrity.c 								      block));
block            1825 fs/btrfs/check-integrity.c 			block->logical_bytenr = bytenr;
block            1833 fs/btrfs/check-integrity.c 			bytenr = block->logical_bytenr;
block            1837 fs/btrfs/check-integrity.c 				       block->mirror_num,
block            1838 fs/btrfs/check-integrity.c 				       btrfsic_get_block_type(state, block));
block            1843 fs/btrfs/check-integrity.c 			       list_empty(&block->ref_to_list) ? ' ' : '!',
block            1844 fs/btrfs/check-integrity.c 			       list_empty(&block->ref_from_list) ? ' ' : '!');
block            1845 fs/btrfs/check-integrity.c 		if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
block            1847 fs/btrfs/check-integrity.c 			       btrfsic_get_block_type(state, block), bytenr,
block            1848 fs/btrfs/check-integrity.c 			       dev_state->name, dev_bytenr, block->mirror_num,
block            1849 fs/btrfs/check-integrity.c 			       block->generation,
block            1850 fs/btrfs/check-integrity.c 			       btrfs_disk_key_objectid(&block->disk_key),
block            1851 fs/btrfs/check-integrity.c 			       block->disk_key.type,
block            1852 fs/btrfs/check-integrity.c 			       btrfs_disk_key_offset(&block->disk_key),
block            1859 fs/btrfs/check-integrity.c 		if (!block->is_iodone && !block->never_written) {
block            1861 fs/btrfs/check-integrity.c 			       btrfsic_get_block_type(state, block), bytenr,
block            1862 fs/btrfs/check-integrity.c 			       dev_state->name, dev_bytenr, block->mirror_num,
block            1863 fs/btrfs/check-integrity.c 			       block->generation,
block            1878 fs/btrfs/check-integrity.c 		list_for_each_entry_safe(l, tmp, &block->ref_to_list,
block            1900 fs/btrfs/check-integrity.c 			block->never_written = 0;
block            1901 fs/btrfs/check-integrity.c 			block->iodone_w_error = 0;
block            1903 fs/btrfs/check-integrity.c 				block->is_iodone = 0;
block            1906 fs/btrfs/check-integrity.c 					block->orig_bio_bh_private =
block            1908 fs/btrfs/check-integrity.c 					block->orig_bio_bh_end_io.bio =
block            1910 fs/btrfs/check-integrity.c 					block->next_in_same_bio = NULL;
block            1911 fs/btrfs/check-integrity.c 					bio->bi_private = block;
block            1920 fs/btrfs/check-integrity.c 					block->orig_bio_bh_private =
block            1922 fs/btrfs/check-integrity.c 					block->orig_bio_bh_end_io.bio =
block            1925 fs/btrfs/check-integrity.c 					block->next_in_same_bio = chained_block;
block            1926 fs/btrfs/check-integrity.c 					bio->bi_private = block;
block            1929 fs/btrfs/check-integrity.c 				block->is_iodone = 0;
block            1930 fs/btrfs/check-integrity.c 				block->orig_bio_bh_private = bh->b_private;
block            1931 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bh = bh->b_end_io;
block            1932 fs/btrfs/check-integrity.c 				block->next_in_same_bio = NULL;
block            1933 fs/btrfs/check-integrity.c 				bh->b_private = block;
block            1936 fs/btrfs/check-integrity.c 				block->is_iodone = 1;
block            1937 fs/btrfs/check-integrity.c 				block->orig_bio_bh_private = NULL;
block            1938 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bio = NULL;
block            1939 fs/btrfs/check-integrity.c 				block->next_in_same_bio = NULL;
block            1943 fs/btrfs/check-integrity.c 		block->flush_gen = dev_state->last_flush_gen + 1;
block            1944 fs/btrfs/check-integrity.c 		block->submit_bio_bh_rw = submit_bio_bh_rw;
block            1946 fs/btrfs/check-integrity.c 			block->logical_bytenr = bytenr;
block            1947 fs/btrfs/check-integrity.c 			block->is_metadata = 1;
block            1948 fs/btrfs/check-integrity.c 			if (block->is_superblock) {
block            1953 fs/btrfs/check-integrity.c 						block,
block            1959 fs/btrfs/check-integrity.c 					btrfsic_dump_tree_sub(state, block, 0);
block            1962 fs/btrfs/check-integrity.c 				block->mirror_num = 0;	/* unknown */
block            1965 fs/btrfs/check-integrity.c 						block,
block            1973 fs/btrfs/check-integrity.c 			block->is_metadata = 0;
block            1974 fs/btrfs/check-integrity.c 			block->mirror_num = 0;	/* unknown */
block            1975 fs/btrfs/check-integrity.c 			block->generation = BTRFSIC_GENERATION_UNKNOWN;
block            1977 fs/btrfs/check-integrity.c 			    && list_empty(&block->ref_from_list)) {
block            1984 fs/btrfs/check-integrity.c 				btrfsic_block_hashtable_remove(block);
block            1985 fs/btrfs/check-integrity.c 				list_del(&block->all_blocks_node);
block            1986 fs/btrfs/check-integrity.c 				btrfsic_block_free(block);
block            2027 fs/btrfs/check-integrity.c 		block = btrfsic_block_alloc();
block            2028 fs/btrfs/check-integrity.c 		if (NULL == block) {
block            2033 fs/btrfs/check-integrity.c 		block->dev_state = dev_state;
block            2034 fs/btrfs/check-integrity.c 		block->dev_bytenr = dev_bytenr;
block            2035 fs/btrfs/check-integrity.c 		block->logical_bytenr = bytenr;
block            2036 fs/btrfs/check-integrity.c 		block->is_metadata = is_metadata;
block            2037 fs/btrfs/check-integrity.c 		block->never_written = 0;
block            2038 fs/btrfs/check-integrity.c 		block->iodone_w_error = 0;
block            2039 fs/btrfs/check-integrity.c 		block->mirror_num = 0;	/* unknown */
block            2040 fs/btrfs/check-integrity.c 		block->flush_gen = dev_state->last_flush_gen + 1;
block            2041 fs/btrfs/check-integrity.c 		block->submit_bio_bh_rw = submit_bio_bh_rw;
block            2043 fs/btrfs/check-integrity.c 			block->is_iodone = 0;
block            2046 fs/btrfs/check-integrity.c 				block->orig_bio_bh_private = bio->bi_private;
block            2047 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bio = bio->bi_end_io;
block            2048 fs/btrfs/check-integrity.c 				block->next_in_same_bio = NULL;
block            2049 fs/btrfs/check-integrity.c 				bio->bi_private = block;
block            2058 fs/btrfs/check-integrity.c 				block->orig_bio_bh_private =
block            2060 fs/btrfs/check-integrity.c 				block->orig_bio_bh_end_io.bio =
block            2062 fs/btrfs/check-integrity.c 				block->next_in_same_bio = chained_block;
block            2063 fs/btrfs/check-integrity.c 				bio->bi_private = block;
block            2066 fs/btrfs/check-integrity.c 			block->is_iodone = 0;
block            2067 fs/btrfs/check-integrity.c 			block->orig_bio_bh_private = bh->b_private;
block            2068 fs/btrfs/check-integrity.c 			block->orig_bio_bh_end_io.bh = bh->b_end_io;
block            2069 fs/btrfs/check-integrity.c 			block->next_in_same_bio = NULL;
block            2070 fs/btrfs/check-integrity.c 			bh->b_private = block;
block            2073 fs/btrfs/check-integrity.c 			block->is_iodone = 1;
block            2074 fs/btrfs/check-integrity.c 			block->orig_bio_bh_private = NULL;
block            2075 fs/btrfs/check-integrity.c 			block->orig_bio_bh_end_io.bio = NULL;
block            2076 fs/btrfs/check-integrity.c 			block->next_in_same_bio = NULL;
block            2081 fs/btrfs/check-integrity.c 			       block->logical_bytenr, block->dev_state->name,
block            2082 fs/btrfs/check-integrity.c 			       block->dev_bytenr, block->mirror_num);
block            2083 fs/btrfs/check-integrity.c 		list_add(&block->all_blocks_node, &state->all_blocks_list);
block            2084 fs/btrfs/check-integrity.c 		btrfsic_block_hashtable_add(block, &state->block_hashtable);
block            2087 fs/btrfs/check-integrity.c 			ret = btrfsic_process_metablock(state, block,
block            2106 fs/btrfs/check-integrity.c 	struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
block            2115 fs/btrfs/check-integrity.c 	BUG_ON(NULL == block);
block            2116 fs/btrfs/check-integrity.c 	bp->bi_private = block->orig_bio_bh_private;
block            2117 fs/btrfs/check-integrity.c 	bp->bi_end_io = block->orig_bio_bh_end_io.bio;
block            2121 fs/btrfs/check-integrity.c 		struct btrfsic_dev_state *const dev_state = block->dev_state;
block            2127 fs/btrfs/check-integrity.c 			       btrfsic_get_block_type(dev_state->state, block),
block            2128 fs/btrfs/check-integrity.c 			       block->logical_bytenr, dev_state->name,
block            2129 fs/btrfs/check-integrity.c 			       block->dev_bytenr, block->mirror_num);
block            2130 fs/btrfs/check-integrity.c 		next_block = block->next_in_same_bio;
block            2131 fs/btrfs/check-integrity.c 		block->iodone_w_error = iodone_w_error;
block            2132 fs/btrfs/check-integrity.c 		if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
block            2140 fs/btrfs/check-integrity.c 		if (block->submit_bio_bh_rw & REQ_FUA)
block            2141 fs/btrfs/check-integrity.c 			block->flush_gen = 0; /* FUA completed means block is
block            2143 fs/btrfs/check-integrity.c 		block->is_iodone = 1; /* for FLUSH, this releases the block */
block            2144 fs/btrfs/check-integrity.c 		block = next_block;
block            2145 fs/btrfs/check-integrity.c 	} while (NULL != block);
block            2152 fs/btrfs/check-integrity.c 	struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private;
block            2156 fs/btrfs/check-integrity.c 	BUG_ON(NULL == block);
block            2157 fs/btrfs/check-integrity.c 	dev_state = block->dev_state;
block            2161 fs/btrfs/check-integrity.c 		       btrfsic_get_block_type(dev_state->state, block),
block            2162 fs/btrfs/check-integrity.c 		       block->logical_bytenr, block->dev_state->name,
block            2163 fs/btrfs/check-integrity.c 		       block->dev_bytenr, block->mirror_num);
block            2165 fs/btrfs/check-integrity.c 	block->iodone_w_error = iodone_w_error;
block            2166 fs/btrfs/check-integrity.c 	if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
block            2173 fs/btrfs/check-integrity.c 	if (block->submit_bio_bh_rw & REQ_FUA)
block            2174 fs/btrfs/check-integrity.c 		block->flush_gen = 0; /* FUA completed means block is on disk */
block            2176 fs/btrfs/check-integrity.c 	bh->b_private = block->orig_bio_bh_private;
block            2177 fs/btrfs/check-integrity.c 	bh->b_end_io = block->orig_bio_bh_end_io.bh;
block            2178 fs/btrfs/check-integrity.c 	block->is_iodone = 1; /* for FLUSH, this releases the block */
block            2317 fs/btrfs/check-integrity.c 					struct btrfsic_block *const block,
block            2347 fs/btrfs/check-integrity.c 	list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
block            2351 fs/btrfs/check-integrity.c 			       btrfsic_get_block_type(state, block),
block            2352 fs/btrfs/check-integrity.c 			       block->logical_bytenr, block->dev_state->name,
block            2353 fs/btrfs/check-integrity.c 			       block->dev_bytenr, block->mirror_num,
block            2406 fs/btrfs/check-integrity.c 			       l->block_ref_to->mirror_num, block->flush_gen,
block            2422 fs/btrfs/check-integrity.c 		const struct btrfsic_block *block,
block            2439 fs/btrfs/check-integrity.c 	list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
block            2443 fs/btrfs/check-integrity.c 			       btrfsic_get_block_type(state, block),
block            2444 fs/btrfs/check-integrity.c 			       block->logical_bytenr, block->dev_state->name,
block            2445 fs/btrfs/check-integrity.c 			       block->dev_bytenr, block->mirror_num,
block            2499 fs/btrfs/check-integrity.c 				   const struct btrfsic_block *block)
block            2501 fs/btrfs/check-integrity.c 	if (block->is_superblock &&
block            2502 fs/btrfs/check-integrity.c 	    state->latest_superblock->dev_bytenr == block->dev_bytenr &&
block            2503 fs/btrfs/check-integrity.c 	    state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
block            2505 fs/btrfs/check-integrity.c 	else if (block->is_superblock)
block            2507 fs/btrfs/check-integrity.c 	else if (block->is_metadata)
block            2519 fs/btrfs/check-integrity.c 				  const struct btrfsic_block *block,
block            2537 fs/btrfs/check-integrity.c 			     btrfsic_get_block_type(state, block),
block            2538 fs/btrfs/check-integrity.c 			     block->logical_bytenr, block->dev_state->name,
block            2539 fs/btrfs/check-integrity.c 			     block->dev_bytenr, block->mirror_num);
block            2546 fs/btrfs/check-integrity.c 	if (list_empty(&block->ref_to_list)) {
block            2550 fs/btrfs/check-integrity.c 	if (block->mirror_num > 1 &&
block            2557 fs/btrfs/check-integrity.c 	list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
block            2635 fs/btrfs/check-integrity.c 	struct btrfsic_block *block;
block            2637 fs/btrfs/check-integrity.c 	block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
block            2640 fs/btrfs/check-integrity.c 	if (NULL == block) {
block            2643 fs/btrfs/check-integrity.c 		block = btrfsic_block_alloc();
block            2644 fs/btrfs/check-integrity.c 		if (NULL == block) {
block            2651 fs/btrfs/check-integrity.c 			btrfsic_block_free(block);
block            2654 fs/btrfs/check-integrity.c 		block->dev_state = dev_state;
block            2655 fs/btrfs/check-integrity.c 		block->dev_bytenr = block_ctx->dev_bytenr;
block            2656 fs/btrfs/check-integrity.c 		block->logical_bytenr = block_ctx->start;
block            2657 fs/btrfs/check-integrity.c 		block->is_metadata = is_metadata;
block            2658 fs/btrfs/check-integrity.c 		block->is_iodone = is_iodone;
block            2659 fs/btrfs/check-integrity.c 		block->never_written = never_written;
block            2660 fs/btrfs/check-integrity.c 		block->mirror_num = mirror_num;
block            2664 fs/btrfs/check-integrity.c 			       btrfsic_get_block_type(state, block),
block            2665 fs/btrfs/check-integrity.c 			       block->logical_bytenr, dev_state->name,
block            2666 fs/btrfs/check-integrity.c 			       block->dev_bytenr, mirror_num);
block            2667 fs/btrfs/check-integrity.c 		list_add(&block->all_blocks_node, &state->all_blocks_list);
block            2668 fs/btrfs/check-integrity.c 		btrfsic_block_hashtable_add(block, &state->block_hashtable);
block            2676 fs/btrfs/check-integrity.c 	return block;
block            2772 fs/btrfs/check-integrity.c 			struct btrfsic_block *const block =
block            2775 fs/btrfs/check-integrity.c 			block->is_iodone = 0;
block            2776 fs/btrfs/check-integrity.c 			block->never_written = 0;
block            2777 fs/btrfs/check-integrity.c 			block->iodone_w_error = 0;
block            2778 fs/btrfs/check-integrity.c 			block->flush_gen = dev_state->last_flush_gen + 1;
block            2779 fs/btrfs/check-integrity.c 			block->submit_bio_bh_rw = op_flags;
block            2780 fs/btrfs/check-integrity.c 			block->orig_bio_bh_private = bh->b_private;
block            2781 fs/btrfs/check-integrity.c 			block->orig_bio_bh_end_io.bh = bh->b_end_io;
block            2782 fs/btrfs/check-integrity.c 			block->next_in_same_bio = NULL;
block            2783 fs/btrfs/check-integrity.c 			bh->b_private = block;
block            2858 fs/btrfs/check-integrity.c 			struct btrfsic_block *const block =
block            2861 fs/btrfs/check-integrity.c 			block->is_iodone = 0;
block            2862 fs/btrfs/check-integrity.c 			block->never_written = 0;
block            2863 fs/btrfs/check-integrity.c 			block->iodone_w_error = 0;
block            2864 fs/btrfs/check-integrity.c 			block->flush_gen = dev_state->last_flush_gen + 1;
block            2865 fs/btrfs/check-integrity.c 			block->submit_bio_bh_rw = bio->bi_opf;
block            2866 fs/btrfs/check-integrity.c 			block->orig_bio_bh_private = bio->bi_private;
block            2867 fs/btrfs/check-integrity.c 			block->orig_bio_bh_end_io.bio = bio->bi_end_io;
block            2868 fs/btrfs/check-integrity.c 			block->next_in_same_bio = NULL;
block            2869 fs/btrfs/check-integrity.c 			bio->bi_private = block;
block            3846 fs/btrfs/qgroup.c 	struct btrfs_qgroup_swapped_block *block;
block            3865 fs/btrfs/qgroup.c 	block = kmalloc(sizeof(*block), GFP_NOFS);
block            3866 fs/btrfs/qgroup.c 	if (!block) {
block            3875 fs/btrfs/qgroup.c 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
block            3876 fs/btrfs/qgroup.c 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
block            3878 fs/btrfs/qgroup.c 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
block            3879 fs/btrfs/qgroup.c 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
block            3881 fs/btrfs/qgroup.c 	block->last_snapshot = last_snapshot;
block            3882 fs/btrfs/qgroup.c 	block->level = level;
block            3890 fs/btrfs/qgroup.c 		block->trace_leaf = true;
block            3892 fs/btrfs/qgroup.c 		block->trace_leaf = false;
block            3893 fs/btrfs/qgroup.c 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
block            3905 fs/btrfs/qgroup.c 		if (entry->subvol_bytenr < block->subvol_bytenr) {
block            3907 fs/btrfs/qgroup.c 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
block            3911 fs/btrfs/qgroup.c 					block->subvol_generation ||
block            3912 fs/btrfs/qgroup.c 			    entry->reloc_bytenr != block->reloc_bytenr ||
block            3914 fs/btrfs/qgroup.c 					block->reloc_generation) {
block            3925 fs/btrfs/qgroup.c 			kfree(block);
block            3929 fs/btrfs/qgroup.c 	rb_link_node(&block->node, parent, cur);
block            3930 fs/btrfs/qgroup.c 	rb_insert_color(&block->node, &blocks->blocks[level]);
block            3953 fs/btrfs/qgroup.c 	struct btrfs_qgroup_swapped_block *block;
block            3975 fs/btrfs/qgroup.c 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
block            3976 fs/btrfs/qgroup.c 		if (block->subvol_bytenr < subvol_eb->start) {
block            3978 fs/btrfs/qgroup.c 		} else if (block->subvol_bytenr > subvol_eb->start) {
block            3990 fs/btrfs/qgroup.c 	rb_erase(&block->node, &blocks->blocks[level]);
block            4001 fs/btrfs/qgroup.c 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
block            4002 fs/btrfs/qgroup.c 				   block->reloc_generation, block->level,
block            4003 fs/btrfs/qgroup.c 				   &block->first_key);
block            4015 fs/btrfs/qgroup.c 			block->last_snapshot, block->trace_leaf);
block            4017 fs/btrfs/qgroup.c 	kfree(block);
block            2586 fs/btrfs/relocation.c 	struct tree_block *block;
block            2589 fs/btrfs/relocation.c 		block = rb_entry(rb_node, struct tree_block, rb_node);
block            2591 fs/btrfs/relocation.c 		kfree(block);
block            3056 fs/btrfs/relocation.c 			      struct tree_block *block)
block            3060 fs/btrfs/relocation.c 	BUG_ON(block->key_ready);
block            3061 fs/btrfs/relocation.c 	eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
block            3062 fs/btrfs/relocation.c 			     block->level, NULL);
block            3069 fs/btrfs/relocation.c 	if (block->level == 0)
block            3070 fs/btrfs/relocation.c 		btrfs_item_key_to_cpu(eb, &block->key, 0);
block            3072 fs/btrfs/relocation.c 		btrfs_node_key_to_cpu(eb, &block->key, 0);
block            3074 fs/btrfs/relocation.c 	block->key_ready = 1;
block            3143 fs/btrfs/relocation.c 	struct tree_block *block;
block            3155 fs/btrfs/relocation.c 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
block            3156 fs/btrfs/relocation.c 		if (!block->key_ready)
block            3157 fs/btrfs/relocation.c 			readahead_tree_block(fs_info, block->bytenr);
block            3161 fs/btrfs/relocation.c 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
block            3162 fs/btrfs/relocation.c 		if (!block->key_ready) {
block            3163 fs/btrfs/relocation.c 			err = get_tree_block_key(fs_info, block);
block            3170 fs/btrfs/relocation.c 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
block            3171 fs/btrfs/relocation.c 		node = build_backref_tree(rc, &block->key,
block            3172 fs/btrfs/relocation.c 					  block->level, block->bytenr);
block            3178 fs/btrfs/relocation.c 		ret = relocate_tree_block(trans, rc, node, &block->key,
block            3455 fs/btrfs/relocation.c 	struct tree_block *block;
block            3487 fs/btrfs/relocation.c 	block = kmalloc(sizeof(*block), GFP_NOFS);
block            3488 fs/btrfs/relocation.c 	if (!block)
block            3491 fs/btrfs/relocation.c 	block->bytenr = extent_key->objectid;
block            3492 fs/btrfs/relocation.c 	block->key.objectid = rc->extent_root->fs_info->nodesize;
block            3493 fs/btrfs/relocation.c 	block->key.offset = generation;
block            3494 fs/btrfs/relocation.c 	block->level = level;
block            3495 fs/btrfs/relocation.c 	block->key_ready = 0;
block            3497 fs/btrfs/relocation.c 	rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
block            3499 fs/btrfs/relocation.c 		backref_tree_panic(rb_node, -EEXIST, block->bytenr);
block            3656 fs/btrfs/relocation.c 	struct tree_block *block;
block            3785 fs/btrfs/relocation.c 			block = kmalloc(sizeof(*block), GFP_NOFS);
block            3786 fs/btrfs/relocation.c 			if (!block) {
block            3790 fs/btrfs/relocation.c 			block->bytenr = leaf->start;
block            3791 fs/btrfs/relocation.c 			btrfs_item_key_to_cpu(leaf, &block->key, 0);
block            3792 fs/btrfs/relocation.c 			block->level = 0;
block            3793 fs/btrfs/relocation.c 			block->key_ready = 1;
block            3794 fs/btrfs/relocation.c 			rb_node = tree_insert(blocks, block->bytenr,
block            3795 fs/btrfs/relocation.c 					      &block->rb_node);
block            3798 fs/btrfs/relocation.c 						   block->bytenr);
block             194 fs/buffer.c    __find_get_block_slow(struct block_device *bdev, sector_t block)
block             206 fs/buffer.c    	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
block             219 fs/buffer.c    		else if (bh->b_blocknr == block) {
block             237 fs/buffer.c    		       (unsigned long long)block,
block             895 fs/buffer.c    			sector_t block, int size)
block             907 fs/buffer.c    			bh->b_blocknr = block;
block             910 fs/buffer.c    			if (block < end_block)
block             913 fs/buffer.c    		block++;
block             929 fs/buffer.c    grow_dev_page(struct block_device *bdev, sector_t block,
block             981 fs/buffer.c    	ret = (block < end_block) ? 1 : -ENXIO;
block             993 fs/buffer.c    grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
block            1003 fs/buffer.c    	index = block >> sizebits;
block            1009 fs/buffer.c    	if (unlikely(index != block >> sizebits)) {
block            1012 fs/buffer.c    			__func__, (unsigned long long)block,
block            1018 fs/buffer.c    	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
block            1022 fs/buffer.c    __getblk_slow(struct block_device *bdev, sector_t block,
block            1041 fs/buffer.c    		bh = __find_get_block(bdev, block, size);
block            1045 fs/buffer.c    		ret = grow_buffers(bdev, block, size, gfp);
block            1255 fs/buffer.c    lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
block            1265 fs/buffer.c    		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
block            1290 fs/buffer.c    __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
block            1292 fs/buffer.c    	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
block            1296 fs/buffer.c    		bh = __find_get_block_slow(bdev, block);
block            1315 fs/buffer.c    __getblk_gfp(struct block_device *bdev, sector_t block,
block            1318 fs/buffer.c    	struct buffer_head *bh = __find_get_block(bdev, block, size);
block            1322 fs/buffer.c    		bh = __getblk_slow(bdev, block, size, gfp);
block            1330 fs/buffer.c    void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
block            1332 fs/buffer.c    	struct buffer_head *bh = __getblk(bdev, block, size);
block            1340 fs/buffer.c    void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
block            1343 fs/buffer.c    	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
block            1364 fs/buffer.c    __bread_gfp(struct block_device *bdev, sector_t block,
block            1367 fs/buffer.c    	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
block            1574 fs/buffer.c    void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
block            1579 fs/buffer.c    	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
block            1585 fs/buffer.c    	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
block            1606 fs/buffer.c    				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
block            1608 fs/buffer.c    				if (bh->b_blocknr >= block + len)
block            1685 fs/buffer.c    	sector_t block;
block            1709 fs/buffer.c    	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
block            1717 fs/buffer.c    		if (block > last_block) {
block            1731 fs/buffer.c    			err = get_block(inode, block, bh, 1);
block            1742 fs/buffer.c    		block++;
block            1887 fs/buffer.c    iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
block            1890 fs/buffer.c    	loff_t offset = block << inode->i_blkbits;
block            1948 fs/buffer.c    	sector_t block;
block            1962 fs/buffer.c    	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
block            1965 fs/buffer.c    	    block++, block_start=block_end, bh = bh->b_this_page) {
block            1979 fs/buffer.c    				err = get_block(inode, block, bh, 1);
block            1983 fs/buffer.c    				iomap_to_bh(inode, block, bh, iomap);
block            2969 fs/buffer.c    sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
block            2977 fs/buffer.c    	get_block(inode, block, &tmp, 0);
block             399 fs/cachefiles/rdwr.c 	sector_t block0, block;
block             434 fs/cachefiles/rdwr.c 	block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
block             437 fs/cachefiles/rdwr.c 	       (unsigned long long) block);
block             439 fs/cachefiles/rdwr.c 	if (block) {
block             731 fs/cachefiles/rdwr.c 		sector_t block0, block;
block             742 fs/cachefiles/rdwr.c 		block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
block             746 fs/cachefiles/rdwr.c 		       (unsigned long long) block);
block             748 fs/cachefiles/rdwr.c 		if (block) {
block             525 fs/ecryptfs/mmap.c static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
block             535 fs/ecryptfs/mmap.c 							 block);
block              26 fs/efs/dir.c   	efs_block_t		block;
block              34 fs/efs/dir.c   	block = ctx->pos >> EFS_DIRBSIZE_BITS;
block              40 fs/efs/dir.c   	while (block < inode->i_blocks) {
block              45 fs/efs/dir.c   		bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
block              49 fs/efs/dir.c   			       __func__, block);
block              76 fs/efs/dir.c   				 __func__, block, slot, dirblock->slots-1,
block              81 fs/efs/dir.c   			ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
block              99 fs/efs/dir.c   		block++;
block             101 fs/efs/dir.c   	ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
block              27 fs/efs/file.c  			__func__, block, inode->i_blocks, inode->i_size);
block              37 fs/efs/file.c  int efs_bmap(struct inode *inode, efs_block_t block) {
block              39 fs/efs/file.c  	if (block < 0) {
block              45 fs/efs/file.c  	if (!(block < inode->i_blocks)) {
block              51 fs/efs/file.c  			__func__, block, inode->i_blocks, inode->i_size);
block              56 fs/efs/file.c  	return efs_map_block(inode, block);
block              21 fs/efs/inode.c static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
block              23 fs/efs/inode.c 	return generic_block_bmap(mapping,block,efs_get_block);
block              56 fs/efs/inode.c 	efs_block_t block, offset;
block              83 fs/efs/inode.c 	block = sb->fs_start + sb->first_block + 
block              91 fs/efs/inode.c 	bh = sb_bread(inode->i_sb, block);
block              93 fs/efs/inode.c 		pr_warn("%s() failed at block %d\n", __func__, block);
block             179 fs/efs/inode.c efs_extent_check(efs_extent *ptr, efs_block_t block, struct efs_sb_info *sb) {
block             192 fs/efs/inode.c 	if ((block >= offset) && (block < offset+length)) {
block             193 fs/efs/inode.c 		return(sb->fs_start + start + block - offset);
block             199 fs/efs/inode.c efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
block             213 fs/efs/inode.c 		if ((result = efs_extent_check(&in->extents[last], block, sb)))
block             230 fs/efs/inode.c 			if ((result = efs_extent_check(&in->extents[cur], block, sb))) {
block             236 fs/efs/inode.c 		pr_err("%s() failed to map block %u (dir)\n", __func__, block);
block             241 fs/efs/inode.c 		 __func__, block);
block             264 fs/efs/inode.c 			       cur, block);
block             302 fs/efs/inode.c 		if ((result = efs_extent_check(&ext, block, sb))) {
block             309 fs/efs/inode.c 	pr_err("%s() failed to map block %u (indir)\n", __func__, block);
block              25 fs/efs/namei.c 	efs_block_t		block;
block              31 fs/efs/namei.c 	for(block = 0; block < inode->i_blocks; block++) {
block              33 fs/efs/namei.c 		bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
block              36 fs/efs/namei.c 			       __func__, block);
block             344 fs/erofs/data.c static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
block             351 fs/erofs/data.c 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
block             355 fs/erofs/data.c 	return generic_block_bmap(mapping, block, erofs_get_block);
block             478 fs/ext2/balloc.c void ext2_free_blocks (struct inode * inode, unsigned long block,
block             493 fs/ext2/balloc.c 	if (!ext2_data_block_valid(sbi, block, count)) {
block             496 fs/ext2/balloc.c 			    "block = %lu, count = %lu", block, count);
block             500 fs/ext2/balloc.c 	ext2_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
block             504 fs/ext2/balloc.c 	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
block             506 fs/ext2/balloc.c 	bit = (block - le32_to_cpu(es->s_first_data_block)) %
block             525 fs/ext2/balloc.c 	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
block             526 fs/ext2/balloc.c 	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
block             527 fs/ext2/balloc.c 	    in_range (block, le32_to_cpu(desc->bg_inode_table),
block             529 fs/ext2/balloc.c 	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
block             534 fs/ext2/balloc.c 			    block, count);
block             542 fs/ext2/balloc.c 				"bit already cleared for block %lu", block + i);
block             556 fs/ext2/balloc.c 		block += count;
block             170 fs/ext2/ialloc.c 	unsigned long block;
block             188 fs/ext2/ialloc.c 	block = le32_to_cpu(gdp->bg_inode_table) +
block             190 fs/ext2/ialloc.c 	sb_breadahead(inode->i_sb, block);
block             330 fs/ext2/inode.c static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
block             341 fs/ext2/inode.c 	if (block_i && (block == block_i->last_alloc_logical_block + 1)
block             561 fs/ext2/inode.c 			long block, Indirect *where, int num, int blks)
block             590 fs/ext2/inode.c 		block_i->last_alloc_logical_block = block + blks - 1;
block             933 fs/ext2/inode.c static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
block             935 fs/ext2/inode.c 	return generic_block_bmap(mapping,block,ext2_get_block);
block            1333 fs/ext2/inode.c 	unsigned long block;
block            1350 fs/ext2/inode.c 	block = le32_to_cpu(gdp->bg_inode_table) +
block            1352 fs/ext2/inode.c 	if (!(bh = sb_bread(sb, block)))
block            1366 fs/ext2/inode.c 		   (unsigned long) ino, block);
block             830 fs/ext2/super.c 	unsigned long block;
block            1106 fs/ext2/super.c 		block = descriptor_loc(sb, logic_sb_block, i);
block            1107 fs/ext2/super.c 		sbi->s_group_desc[i] = sb_bread(sb, block);
block             693 fs/ext2/xattr.c 			int block = ext2_new_block(inode, goal, &error);
block             696 fs/ext2/xattr.c 			ea_idebug(inode, "creating block %d", block);
block             698 fs/ext2/xattr.c 			new_bh = sb_getblk(sb, block);
block             700 fs/ext2/xattr.c 				ext2_free_blocks(inode, block, 1);
block              36 fs/ext4/balloc.c 				   ext4_fsblk_t block)
block              41 fs/ext4/balloc.c 		group = (block -
block              45 fs/ext4/balloc.c 		ext4_get_group_no_and_offset(sb, block, &group, NULL);
block              74 fs/ext4/balloc.c 				      ext4_fsblk_t block,
block              79 fs/ext4/balloc.c 	actual_group = ext4_get_group_number(sb, block);
block            2002 fs/ext4/ext4.h #define EXT4_DIRENT_TAIL(block, blocksize) \
block            2003 fs/ext4/ext4.h 	((struct ext4_dir_entry_tail *)(((void *)(block)) + \
block            2332 fs/ext4/ext4.h 					  ext4_fsblk_t block);
block            2565 fs/ext4/ext4.h 			     struct buffer_head *bh, ext4_fsblk_t block,
block            2572 fs/ext4/ext4.h 				ext4_fsblk_t block, unsigned long count);
block            2580 fs/ext4/ext4.h int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
block            2699 fs/ext4/ext4.h 					 sector_t block, int op_flags);
block            2746 fs/ext4/ext4.h #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...)			\
block            2747 fs/ext4/ext4.h 	ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
block            2749 fs/ext4/ext4.h #define EXT4_ERROR_FILE(file, block, fmt, a...)				\
block            2750 fs/ext4/ext4.h 	ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
block            2754 fs/ext4/ext4.h #define ext4_error_inode(inode, func, line, block, fmt, ...)		\
block            2755 fs/ext4/ext4.h 	__ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__)
block            2756 fs/ext4/ext4.h #define ext4_error_file(file, func, line, block, fmt, ...)		\
block            2757 fs/ext4/ext4.h 	__ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
block            2770 fs/ext4/ext4.h #define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...)		\
block            2771 fs/ext4/ext4.h 	__ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \
block            2776 fs/ext4/ext4.h #define ext4_error_inode(inode, func, line, block, fmt, ...)		\
block            2779 fs/ext4/ext4.h 	__ext4_error_inode(inode, "", 0, block, " ");			\
block            2781 fs/ext4/ext4.h #define ext4_error_file(file, func, line, block, fmt, ...)		\
block            2784 fs/ext4/ext4.h 	__ext4_error_file(file, "", 0, block, " ");			\
block            2813 fs/ext4/ext4.h #define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...)		\
block            2816 fs/ext4/ext4.h 	__ext4_grp_locked_error("", 0, sb, grp, ino, block, " ");	\
block            3170 fs/ext4/ext4.h 				  struct inode *dir, ext4_lblk_t block,
block             224 fs/ext4/ext4_extents.h 	ext4_fsblk_t block;
block             226 fs/ext4/ext4_extents.h 	block = le32_to_cpu(ex->ee_start_lo);
block             227 fs/ext4/ext4_extents.h 	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
block             228 fs/ext4/ext4_extents.h 	return block;
block             237 fs/ext4/ext4_extents.h 	ext4_fsblk_t block;
block             239 fs/ext4/ext4_extents.h 	block = le32_to_cpu(ix->ei_leaf_lo);
block             240 fs/ext4/ext4_extents.h 	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
block             241 fs/ext4/ext4_extents.h 	return block;
block             172 fs/ext4/extents.c 			      ext4_lblk_t block)
block             200 fs/ext4/extents.c 			if (block > ext_block)
block             201 fs/ext4/extents.c 				return ext_pblk + (block - ext_block);
block             203 fs/ext4/extents.c 				return ext_pblk - (ext_block - block);
block             369 fs/ext4/extents.c 	ext4_fsblk_t block = ext4_ext_pblock(ext);
block             380 fs/ext4/extents.c 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
block             386 fs/ext4/extents.c 	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
block             388 fs/ext4/extents.c 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
block             741 fs/ext4/extents.c 			struct ext4_ext_path *path, ext4_lblk_t block)
block             747 fs/ext4/extents.c 	ext_debug("binsearch for %u(idx):  ", block);
block             753 fs/ext4/extents.c 		if (block < le32_to_cpu(m->ei_block))
block             784 fs/ext4/extents.c 			if (block < le32_to_cpu(ix->ei_block))
block             801 fs/ext4/extents.c 		struct ext4_ext_path *path, ext4_lblk_t block)
block             814 fs/ext4/extents.c 	ext_debug("binsearch for %u:  ", block);
block             821 fs/ext4/extents.c 		if (block < le32_to_cpu(m->ee_block))
block             846 fs/ext4/extents.c 			if (block < le32_to_cpu(ex->ee_block))
block             870 fs/ext4/extents.c ext4_find_extent(struct inode *inode, ext4_lblk_t block,
block             914 fs/ext4/extents.c 		ext4_ext_binsearch_idx(inode, path + ppos, block);
block             937 fs/ext4/extents.c 	ext4_ext_binsearch(inode, path + ppos, block);
block            1517 fs/ext4/extents.c 	ext4_fsblk_t block;
block            1584 fs/ext4/extents.c 	block = ext4_idx_pblock(ix);
block            1587 fs/ext4/extents.c 		bh = read_extent_tree_block(inode, block,
block            1593 fs/ext4/extents.c 		block = ext4_idx_pblock(ix);
block            1597 fs/ext4/extents.c 	bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
block            2177 fs/ext4/extents.c 				    ext4_lblk_t block, ext4_lblk_t num,
block            2184 fs/ext4/extents.c 	ext4_lblk_t last = block + num;
block            2189 fs/ext4/extents.c 	while (block < last && block != EXT_MAX_BLOCKS) {
block            2190 fs/ext4/extents.c 		num = last - block;
block            2194 fs/ext4/extents.c 		path = ext4_find_extent(inode, block, &path, 0);
block            2217 fs/ext4/extents.c 			start = block;
block            2218 fs/ext4/extents.c 			end = block + num;
block            2219 fs/ext4/extents.c 		} else if (le32_to_cpu(ex->ee_block) > block) {
block            2221 fs/ext4/extents.c 			start = block;
block            2223 fs/ext4/extents.c 			if (block + num < end)
block            2224 fs/ext4/extents.c 				end = block + num;
block            2225 fs/ext4/extents.c 		} else if (block >= le32_to_cpu(ex->ee_block)
block            2228 fs/ext4/extents.c 			start = block;
block            2229 fs/ext4/extents.c 			end = block + num;
block            2232 fs/ext4/extents.c 		} else if (block >= le32_to_cpu(ex->ee_block)) {
block            2237 fs/ext4/extents.c 			start = block;
block            2240 fs/ext4/extents.c 			if (block + num < end)
block            2241 fs/ext4/extents.c 				end = block + num;
block            2317 fs/ext4/extents.c 		block = es.es_lblk + es.es_len;
block            2326 fs/ext4/extents.c 				   ext4_lblk_t block, ext4_lblk_t num,
block            2329 fs/ext4/extents.c 	ext4_lblk_t next, end = block + num - 1;
block            2335 fs/ext4/extents.c 	while (block <= end) {
block            2338 fs/ext4/extents.c 		if (!ext4_es_lookup_extent(inode, block, &next, &es))
block            2361 fs/ext4/extents.c 		block = next;
block            5037 fs/ext4/extents.c 	ext4_lblk_t block, next_del;
block            5063 fs/ext4/extents.c 	block = newes->es_lblk + newes->es_len;
block            5064 fs/ext4/extents.c 	ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
block            1293 fs/ext4/extents_status.c 	ext4_fsblk_t block;
block            1328 fs/ext4/extents_status.c 			block = 0x7FDEADBEEFULL;
block            1331 fs/ext4/extents_status.c 				block = ext4_es_pblock(&orig_es) +
block            1333 fs/ext4/extents_status.c 			ext4_es_store_pblock_status(&newes, block,
block            1350 fs/ext4/extents_status.c 				block = orig_es.es_pblk + orig_es.es_len - len2;
block            1351 fs/ext4/extents_status.c 				ext4_es_store_pblock(es, block);
block            1394 fs/ext4/extents_status.c 			block = es->es_pblk + orig_len - len1;
block            1395 fs/ext4/extents_status.c 			ext4_es_store_pblock(es, block);
block             215 fs/ext4/extents_status.h 	ext4_fsblk_t block;
block             217 fs/ext4/extents_status.h 	block = (pb & ~ES_MASK) | (es->es_pblk & ES_MASK);
block             218 fs/ext4/extents_status.h 	es->es_pblk = block;
block             244 fs/ext4/indirect.c static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
block            1328 fs/ext4/inline.c 			   struct inode *dir, ext4_lblk_t block,
block             972 fs/ext4/inode.c 				ext4_lblk_t block, int map_flags)
block             981 fs/ext4/inode.c 	map.m_lblk = block;
block            1029 fs/ext4/inode.c 			       ext4_lblk_t block, int map_flags)
block            1033 fs/ext4/inode.c 	bh = ext4_getblk(handle, inode, block, map_flags);
block            1047 fs/ext4/inode.c int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
block            1053 fs/ext4/inode.c 		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
block            1178 fs/ext4/inode.c 	sector_t block;
block            1195 fs/ext4/inode.c 	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
block            1198 fs/ext4/inode.c 	    block++, block_start = block_end, bh = bh->b_this_page) {
block            1211 fs/ext4/inode.c 			err = get_block(inode, block, bh, 1);
block            3269 fs/ext4/inode.c static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
block            3321 fs/ext4/inode.c 	return generic_block_bmap(mapping, block, ext4_get_block);
block            4574 fs/ext4/inode.c 	ext4_fsblk_t		block;
block            4594 fs/ext4/inode.c 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
block            4597 fs/ext4/inode.c 	bh = sb_getblk(sb, block);
block            4672 fs/ext4/inode.c 			b = block & ~((ext4_fsblk_t) ra_blks - 1);
block            4698 fs/ext4/inode.c 			EXT4_ERROR_INODE_BLOCK(inode, block,
block            4839 fs/ext4/inode.c 	int block;
block            4998 fs/ext4/inode.c 	for (block = 0; block < EXT4_N_BLOCKS; block++)
block            4999 fs/ext4/inode.c 		ei->i_data[block] = raw_inode->i_block[block];
block            5256 fs/ext4/inode.c 	int err = 0, rc, block;
block            5336 fs/ext4/inode.c 		for (block = 0; block < EXT4_N_BLOCKS; block++)
block            5337 fs/ext4/inode.c 			raw_inode->i_block[block] = ei->i_data[block];
block             976 fs/ext4/mballoc.c 	int block, pnum, poff;
block             989 fs/ext4/mballoc.c 	block = group * 2;
block             990 fs/ext4/mballoc.c 	pnum = block / blocks_per_page;
block             991 fs/ext4/mballoc.c 	poff = block % blocks_per_page;
block            1004 fs/ext4/mballoc.c 	block++;
block            1005 fs/ext4/mballoc.c 	pnum = block / blocks_per_page;
block            1103 fs/ext4/mballoc.c 	int block;
block            1140 fs/ext4/mballoc.c 	block = group * 2;
block            1141 fs/ext4/mballoc.c 	pnum = block / blocks_per_page;
block            1142 fs/ext4/mballoc.c 	poff = block % blocks_per_page;
block            1186 fs/ext4/mballoc.c 	block++;
block            1187 fs/ext4/mballoc.c 	pnum = block / blocks_per_page;
block            1188 fs/ext4/mballoc.c 	poff = block % blocks_per_page;
block            1253 fs/ext4/mballoc.c static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
block            1260 fs/ext4/mballoc.c 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
block            1264 fs/ext4/mballoc.c 		block = block >> 1;
block            1265 fs/ext4/mballoc.c 		if (!mb_test_bit(block, bb)) {
block            1418 fs/ext4/mballoc.c 	int block;
block            1442 fs/ext4/mballoc.c 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
block            1446 fs/ext4/mballoc.c 	if (unlikely(block != -1)) {
block            1451 fs/ext4/mballoc.c 		blocknr += EXT4_C2B(sbi, block);
block            1457 fs/ext4/mballoc.c 				      block);
block            1493 fs/ext4/mballoc.c static int mb_find_extent(struct ext4_buddy *e4b, int block,
block            1496 fs/ext4/mballoc.c 	int next = block;
block            1505 fs/ext4/mballoc.c 	BUG_ON(block >= max);
block            1506 fs/ext4/mballoc.c 	if (mb_test_bit(block, buddy)) {
block            1514 fs/ext4/mballoc.c 	order = mb_find_order_for_block(e4b, block);
block            1515 fs/ext4/mballoc.c 	block = block >> order;
block            1518 fs/ext4/mballoc.c 	ex->fe_start = block << order;
block            1529 fs/ext4/mballoc.c 		if (block + 1 >= max)
block            1532 fs/ext4/mballoc.c 		next = (block + 1) * (1 << order);
block            1538 fs/ext4/mballoc.c 		block = next >> order;
block            1547 fs/ext4/mballoc.c 			   block, order, needed, ex->fe_group, ex->fe_start,
block            2951 fs/ext4/mballoc.c 	ext4_fsblk_t block;
block            2985 fs/ext4/mballoc.c 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
block            2988 fs/ext4/mballoc.c 	if (!ext4_data_block_valid(sbi, block, len)) {
block            2990 fs/ext4/mballoc.c 			   "fs metadata", block, block+len);
block            4245 fs/ext4/mballoc.c 	ext4_grpblk_t block;
block            4259 fs/ext4/mballoc.c 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
block            4268 fs/ext4/mballoc.c 	ac->ac_o_ex.fe_start = block;
block            4491 fs/ext4/mballoc.c 	ext4_fsblk_t block = 0;
block            4582 fs/ext4/mballoc.c 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
block            4611 fs/ext4/mballoc.c 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
block            4613 fs/ext4/mballoc.c 	return block;
block            4726 fs/ext4/mballoc.c 		      struct buffer_head *bh, ext4_fsblk_t block,
block            4744 fs/ext4/mballoc.c 		if (block)
block            4745 fs/ext4/mballoc.c 			BUG_ON(block != bh->b_blocknr);
block            4747 fs/ext4/mballoc.c 			block = bh->b_blocknr;
block            4752 fs/ext4/mballoc.c 	    !ext4_data_block_valid(sbi, block, count)) {
block            4754 fs/ext4/mballoc.c 			   "block = %llu, count = %lu", block, count);
block            4758 fs/ext4/mballoc.c 	ext4_debug("freeing block %llu\n", block);
block            4759 fs/ext4/mballoc.c 	trace_ext4_free_blocks(inode, block, count, flags);
block            4765 fs/ext4/mballoc.c 			    inode, bh, block);
block            4775 fs/ext4/mballoc.c 	overflow = EXT4_PBLK_COFF(sbi, block);
block            4779 fs/ext4/mballoc.c 			block += overflow;
block            4785 fs/ext4/mballoc.c 			block -= overflow;
block            4807 fs/ext4/mballoc.c 				bh = sb_find_get_block(inode->i_sb, block + i);
block            4808 fs/ext4/mballoc.c 			ext4_forget(handle, is_metadata, inode, bh, block + i);
block            4814 fs/ext4/mballoc.c 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
block            4842 fs/ext4/mballoc.c 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
block            4843 fs/ext4/mballoc.c 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
block            4844 fs/ext4/mballoc.c 	    in_range(block, ext4_inode_table(sb, gdp),
block            4846 fs/ext4/mballoc.c 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
block            4850 fs/ext4/mballoc.c 			   "Block = %llu, count = %lu", block, count);
block            4967 fs/ext4/mballoc.c 		block += count;
block            4988 fs/ext4/mballoc.c 			 ext4_fsblk_t block, unsigned long count)
block            5000 fs/ext4/mballoc.c 	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
block            5001 fs/ext4/mballoc.c 	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
block            5004 fs/ext4/mballoc.c 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
block            5009 fs/ext4/mballoc.c 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
block            5034 fs/ext4/mballoc.c 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
block            5035 fs/ext4/mballoc.c 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
block            5036 fs/ext4/mballoc.c 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
block            5037 fs/ext4/mballoc.c 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
block            5041 fs/ext4/mballoc.c 			   block, count);
block            5065 fs/ext4/mballoc.c 				   (ext4_fsblk_t)(block + i));
block             380 fs/ext4/migrate.c 	ext4_fsblk_t block;
block             384 fs/ext4/migrate.c 	block = ext4_idx_pblock(ix);
block             385 fs/ext4/migrate.c 	bh = ext4_sb_bread(inode->i_sb, block, 0);
block             400 fs/ext4/migrate.c 	ext4_free_blocks(handle, inode, NULL, block, 1,
block             170 fs/ext4/move_extent.c 	sector_t block;
block             185 fs/ext4/move_extent.c 	block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
block             187 fs/ext4/move_extent.c 	     block++, block_start = block_end, bh = bh->b_this_page) {
block             197 fs/ext4/move_extent.c 			err = ext4_get_block(inode, block, bh, 0);
block              55 fs/ext4/namei.c 					ext4_lblk_t *block)
block              65 fs/ext4/namei.c 	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
block              67 fs/ext4/namei.c 	bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
block              99 fs/ext4/namei.c #define ext4_read_dirblock(inode, block, type) \
block             100 fs/ext4/namei.c 	__ext4_read_dirblock((inode), (block), (type), __func__, __LINE__)
block             103 fs/ext4/namei.c 						ext4_lblk_t block,
block             112 fs/ext4/namei.c 	bh = ext4_bread(NULL, inode, block, 0);
block             117 fs/ext4/namei.c 			       inode->i_ino, (unsigned long)block,
block             123 fs/ext4/namei.c 		ext4_error_inode(inode, func, line, block,
block             133 fs/ext4/namei.c 		if (block == 0)
block             141 fs/ext4/namei.c 		ext4_error_inode(inode, func, line, block,
block             159 fs/ext4/namei.c 			ext4_error_inode(inode, func, line, block,
block             169 fs/ext4/namei.c 			ext4_error_inode(inode, func, line, block,
block             205 fs/ext4/namei.c 	__le32 block;
block             284 fs/ext4/namei.c 					u32 hash, ext4_lblk_t block);
block             534 fs/ext4/namei.c 	return le32_to_cpu(entry->block) & 0x0fffffff;
block             539 fs/ext4/namei.c 	entry->block = cpu_to_le32(value);
block             715 fs/ext4/namei.c 		ext4_lblk_t block = dx_get_block(entries);
block             719 fs/ext4/namei.c 		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
block             720 fs/ext4/namei.c 		bh = ext4_bread(NULL,dir, block, 0);
block             986 fs/ext4/namei.c 				  struct inode *dir, ext4_lblk_t block,
block             996 fs/ext4/namei.c 							(unsigned long)block));
block             997 fs/ext4/namei.c 	bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
block            1024 fs/ext4/namei.c 				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
block            1090 fs/ext4/namei.c 	ext4_lblk_t block;
block            1156 fs/ext4/namei.c 		block = dx_get_block(frame->at);
block            1157 fs/ext4/namei.c 		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
block            1259 fs/ext4/namei.c static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
block            1269 fs/ext4/namei.c 	dx_set_block(new, block);
block            1412 fs/ext4/namei.c static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
block            1419 fs/ext4/namei.c 	if (block == 0)
block            1447 fs/ext4/namei.c 	ext4_lblk_t start, block;
block            1479 fs/ext4/namei.c 		block = start = 0;
block            1504 fs/ext4/namei.c 	block = start;
block            1514 fs/ext4/namei.c 			if (block < start)
block            1515 fs/ext4/namei.c 				ra_max = start - block;
block            1517 fs/ext4/namei.c 				ra_max = nblocks - block;
block            1519 fs/ext4/namei.c 			retval = ext4_bread_batch(dir, block, ra_max,
block            1532 fs/ext4/namei.c 					 (unsigned long) block);
block            1538 fs/ext4/namei.c 		    !is_dx_internal_node(dir, block,
block            1542 fs/ext4/namei.c 					 "block %lu", (unsigned long)block);
block            1549 fs/ext4/namei.c 			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
block            1551 fs/ext4/namei.c 			EXT4_I(dir)->i_dir_start_lookup = block;
block            1560 fs/ext4/namei.c 		if (++block >= nblocks)
block            1561 fs/ext4/namei.c 			block = 0;
block            1562 fs/ext4/namei.c 	} while (block != start);
block            1568 fs/ext4/namei.c 	block = nblocks;
block            1570 fs/ext4/namei.c 	if (block < nblocks) {
block            1630 fs/ext4/namei.c 	ext4_lblk_t block;
block            1640 fs/ext4/namei.c 		block = dx_get_block(frame->at);
block            1641 fs/ext4/namei.c 		bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
block            1646 fs/ext4/namei.c 					 block << EXT4_BLOCK_SIZE_BITS(sb),
block            2050 fs/ext4/namei.c 	ext4_lblk_t  block;
block            2080 fs/ext4/namei.c 	bh2 = ext4_append(handle, dir, &block);
block            2173 fs/ext4/namei.c 	ext4_lblk_t block, blocks;
block            2221 fs/ext4/namei.c 	for (block = 0; block < blocks; block++) {
block            2222 fs/ext4/namei.c 		bh = ext4_read_dirblock(dir, block, DIRENT);
block            2224 fs/ext4/namei.c 			bh = ext4_bread(handle, dir, block,
block            2247 fs/ext4/namei.c 	bh = ext4_append(handle, dir, &block);
block            2719 fs/ext4/namei.c 	ext4_lblk_t block = 0;
block            2736 fs/ext4/namei.c 	dir_block = ext4_append(handle, inode, &block);
block             527 fs/ext4/resize.c 	ext4_fsblk_t block;
block             568 fs/ext4/resize.c 		block = start + ext4_bg_has_super(sb, group);
block             570 fs/ext4/resize.c 		for (j = 0; j < gdblocks; j++, block++) {
block             573 fs/ext4/resize.c 			ext4_debug("update backup group %#04llx\n", block);
block             578 fs/ext4/resize.c 			gdb = sb_getblk(sb, block);
block             618 fs/ext4/resize.c 		block = group_data[i].inode_table;
block             620 fs/ext4/resize.c 			   block, sbi->s_itb_per_group);
block             621 fs/ext4/resize.c 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
block             631 fs/ext4/resize.c 		block = group_data[i].block_bitmap;
block             636 fs/ext4/resize.c 		bh = bclean(handle, sb, block);
block             660 fs/ext4/resize.c 		block = group_data[i].inode_bitmap;
block             665 fs/ext4/resize.c 		bh = bclean(handle, sb, block);
block             683 fs/ext4/resize.c 		block = start;
block             685 fs/ext4/resize.c 			block += group_table_count[j];
block             686 fs/ext4/resize.c 			if (block == (&group_data[i].block_bitmap)[j]) {
block             700 fs/ext4/resize.c 			block = start;
block            1262 fs/ext4/resize.c static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
block            1264 fs/ext4/resize.c 	struct buffer_head *bh = sb_getblk(sb, block);
block             151 fs/ext4/super.c ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
block             153 fs/ext4/super.c 	struct buffer_head *bh = sb_getblk(sb, block);
block             521 fs/ext4/super.c 			unsigned int line, ext4_fsblk_t block,
block             533 fs/ext4/super.c 	es->s_last_error_block = cpu_to_le64(block);
block             538 fs/ext4/super.c 		if (block)
block             542 fs/ext4/super.c 			       block, current->comm, &vaf);
block             555 fs/ext4/super.c 		       unsigned int line, ext4_fsblk_t block,
block             577 fs/ext4/super.c 		if (block)
block             582 fs/ext4/super.c 			       block, current->comm, path, &vaf);
block             768 fs/ext4/super.c 			     unsigned long ino, ext4_fsblk_t block,
block             782 fs/ext4/super.c 	es->s_last_error_block = cpu_to_le64(block);
block             793 fs/ext4/super.c 		if (block)
block             795 fs/ext4/super.c 			       (unsigned long long) block);
block            3620 fs/ext4/super.c 	ext4_fsblk_t block;
block            4287 fs/ext4/super.c 		block = descriptor_loc(sb, logical_sb_block, i);
block            4288 fs/ext4/super.c 		sb_breadahead_unmovable(sb, block);
block            4294 fs/ext4/super.c 		block = descriptor_loc(sb, logical_sb_block, i);
block            4295 fs/ext4/super.c 		bh = sb_bread_unmovable(sb, block);
block            4570 fs/ext4/super.c 	block = ext4_count_free_clusters(sb);
block            4572 fs/ext4/super.c 				   EXT4_C2B(sbi, block));
block            4574 fs/ext4/super.c 	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
block            1346 fs/ext4/xattr.c 	unsigned long block = 0;
block            1356 fs/ext4/xattr.c 		map.m_lblk = block += ret;
block            1375 fs/ext4/xattr.c 	block = 0;
block            1381 fs/ext4/xattr.c 		bh = ext4_getblk(handle, ea_inode, block, 0);
block            1400 fs/ext4/xattr.c 		block += 1;
block            2057 fs/ext4/xattr.c 			ext4_fsblk_t goal, block;
block            2068 fs/ext4/xattr.c 			block = ext4_new_meta_blocks(handle, inode, goal, 0,
block            2074 fs/ext4/xattr.c 				BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
block            2077 fs/ext4/xattr.c 				  (unsigned long long)block);
block            2079 fs/ext4/xattr.c 			new_bh = sb_getblk(sb, block);
block            2083 fs/ext4/xattr.c 				ext4_free_blocks(handle, inode, NULL, block, 1,
block            2961 fs/f2fs/data.c static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
block            2972 fs/f2fs/data.c 	return generic_block_bmap(mapping, block, get_data_block_bmap);
block            3079 fs/f2fs/data.c 			sector_t block;
block            3081 fs/f2fs/data.c 			block = bmap(inode, probe_block + block_in_page);
block            3082 fs/f2fs/data.c 			if (block == 0)
block            3084 fs/f2fs/data.c 			if (block != first_block + block_in_page) {
block             610 fs/f2fs/dir.c  	unsigned long bidx, block;
block             648 fs/f2fs/dir.c  	for (block = bidx; block <= (bidx + nblock - 1); block++) {
block             649 fs/f2fs/dir.c  		dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
block             642 fs/f2fs/node.c static int get_node_path(struct inode *inode, long block,
block             655 fs/f2fs/node.c 	if (block < direct_index) {
block             656 fs/f2fs/node.c 		offset[n] = block;
block             659 fs/f2fs/node.c 	block -= direct_index;
block             660 fs/f2fs/node.c 	if (block < direct_blks) {
block             663 fs/f2fs/node.c 		offset[n] = block;
block             667 fs/f2fs/node.c 	block -= direct_blks;
block             668 fs/f2fs/node.c 	if (block < direct_blks) {
block             671 fs/f2fs/node.c 		offset[n] = block;
block             675 fs/f2fs/node.c 	block -= direct_blks;
block             676 fs/f2fs/node.c 	if (block < indirect_blks) {
block             679 fs/f2fs/node.c 		offset[n++] = block / direct_blks;
block             681 fs/f2fs/node.c 		offset[n] = block % direct_blks;
block             685 fs/f2fs/node.c 	block -= indirect_blks;
block             686 fs/f2fs/node.c 	if (block < indirect_blks) {
block             689 fs/f2fs/node.c 		offset[n++] = block / direct_blks;
block             691 fs/f2fs/node.c 		offset[n] = block % direct_blks;
block             695 fs/f2fs/node.c 	block -= indirect_blks;
block             696 fs/f2fs/node.c 	if (block < dindirect_blks) {
block             699 fs/f2fs/node.c 		offset[n++] = block / indirect_blks;
block             702 fs/f2fs/node.c 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
block             706 fs/f2fs/node.c 		offset[n] = block % direct_blks;
block            2944 fs/f2fs/super.c 	int block;
block            2953 fs/f2fs/super.c 	for (block = 0; block < 2; block++) {
block            2954 fs/f2fs/super.c 		bh = sb_bread(sb, block);
block            2957 fs/f2fs/super.c 				 block + 1);
block            2966 fs/f2fs/super.c 				 block + 1);
block            2974 fs/f2fs/super.c 			*valid_super_block = block;
block             320 fs/fat/inode.c static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
block             326 fs/fat/inode.c 	blocknr = generic_block_bmap(mapping, block, fat_get_block_bmap);
block             131 fs/freevxfs/vxfs_bmap.c vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
block             151 fs/freevxfs/vxfs_bmap.c 		if (block < off) {
block             162 fs/freevxfs/vxfs_bmap.c 					block - off);
block             167 fs/freevxfs/vxfs_bmap.c 			if ((block - off) >= fs32_to_cpu(sbi, typ->vt_size))
block             169 fs/freevxfs/vxfs_bmap.c 			pblock = fs32_to_cpu(sbi, typ->vt_block) + block - off;
block             168 fs/freevxfs/vxfs_inode.c 	u_long				block, offset;
block             175 fs/freevxfs/vxfs_inode.c 	block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize);
block             177 fs/freevxfs/vxfs_inode.c 	bp = sb_bread(sbp, block);
block             193 fs/freevxfs/vxfs_inode.c 	printk(KERN_WARNING "vxfs: unable to read block %ld\n", block);
block              57 fs/freevxfs/vxfs_olt.c vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
block              60 fs/freevxfs/vxfs_olt.c 	return (block * (sbp->s_blocksize / bsize));
block             102 fs/freevxfs/vxfs_subr.c vxfs_bread(struct inode *ip, int block)
block             107 fs/freevxfs/vxfs_subr.c 	pblock = vxfs_bmap1(ip, block);
block             180 fs/freevxfs/vxfs_subr.c vxfs_bmap(struct address_space *mapping, sector_t block)
block             182 fs/freevxfs/vxfs_subr.c 	return generic_block_bmap(mapping, block, vxfs_getblk);
block            2470 fs/fuse/file.c static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
block            2483 fs/fuse/file.c 	inarg.block = block;
block            2497 fs/fuse/file.c 	return err ? 0 : outarg.block;
block              56 fs/gfs2/bmap.c 			       u64 block, struct page *page)
block              90 fs/gfs2/bmap.c 		map_bh(bh, inode->i_sb, block);
block             123 fs/gfs2/bmap.c 	u64 block = 0;
block             138 fs/gfs2/bmap.c 		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
block             142 fs/gfs2/bmap.c 			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
block             143 fs/gfs2/bmap.c 			error = gfs2_dir_get_new_buffer(ip, block, &bh);
block             150 fs/gfs2/bmap.c 			error = gfs2_unstuffer_page(ip, dibh, block, page);
block             163 fs/gfs2/bmap.c 		*(__be64 *)(di + 1) = cpu_to_be64(block);
block             238 fs/gfs2/bmap.c static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
block             245 fs/gfs2/bmap.c 		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
block             395 fs/gfs2/bmap.c 	sector_t factor = 1, block = 0;
block             400 fs/gfs2/bmap.c 			block += mp->mp_list[hgt] * factor;
block             403 fs/gfs2/bmap.c 	return block;
block              92 fs/gfs2/dir.c  int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
block              97 fs/gfs2/dir.c  	bh = gfs2_meta_new(ip->i_gl, block);
block             105 fs/gfs2/dir.c  static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block,
block             111 fs/gfs2/dir.c  	error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, 0, &bh);
block              49 fs/gfs2/dir.h  extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
block             145 fs/gfs2/lops.c 	u64 block;
block             149 fs/gfs2/lops.c 			block = je->dblock + lbn - je->lblock;
block             151 fs/gfs2/lops.c 			return block;
block             503 fs/gfs2/lops.c 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
block             523 fs/gfs2/lops.c 		for (; block < je->lblock + je->blocks; block++, dblock++) {
block             526 fs/gfs2/lops.c 						block >> shift, GFP_NOFS);
block             535 fs/gfs2/lops.c 			if (bio && (off || block < blocks_submitted + max_blocks)) {
block             553 fs/gfs2/lops.c 				blocks_submitted = block;
block             581 fs/gfs2/lops.c 	while (blocks_read < block) {
block             369 fs/gfs2/quota.c 	unsigned int block, offset;
block             381 fs/gfs2/quota.c 	block = qd->qd_slot / sdp->sd_qc_per_block;
block             385 fs/gfs2/quota.c 	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
block             271 fs/gfs2/rgrp.c static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
block             273 fs/gfs2/rgrp.c 	if (!rgrp_contains_block(rbm->rgd, block))
block             276 fs/gfs2/rgrp.c 	rbm->offset = block - rbm->rgd->rd_data0;
block             366 fs/gfs2/rgrp.c 	u64 block;
block             390 fs/gfs2/rgrp.c 		block = gfs2_rbm_to_block(&rbm);
block             391 fs/gfs2/rgrp.c 		if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
block            1616 fs/gfs2/rgrp.c static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
block            1628 fs/gfs2/rgrp.c 		rc = rs_cmp(block, length, rs);
block            1638 fs/gfs2/rgrp.c 		while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
block            1639 fs/gfs2/rgrp.c 			block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
block            1648 fs/gfs2/rgrp.c 	return block;
block            1672 fs/gfs2/rgrp.c 	u64 block = gfs2_rbm_to_block(rbm);
block            1691 fs/gfs2/rgrp.c 	nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
block            1692 fs/gfs2/rgrp.c 	if (nblock == block) {
block            1701 fs/gfs2/rgrp.c 		nblock = block + extlen;
block            1836 fs/gfs2/rgrp.c 	u64 block;
block            1854 fs/gfs2/rgrp.c 		block = gfs2_rbm_to_block(&rbm);
block            1855 fs/gfs2/rgrp.c 		if (gfs2_rbm_from_block(&rbm, block + 1))
block            1857 fs/gfs2/rgrp.c 		if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
block            1859 fs/gfs2/rgrp.c 		if (block == skip)
block            1861 fs/gfs2/rgrp.c 		*last_unlinked = block;
block            1863 fs/gfs2/rgrp.c 		error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
block            2192 fs/gfs2/rgrp.c 	u64 block;
block            2196 fs/gfs2/rgrp.c 	block = gfs2_rbm_to_block(rbm);
block            2199 fs/gfs2/rgrp.c 	block++;
block            2201 fs/gfs2/rgrp.c 		ret = gfs2_rbm_from_block(&pos, block);
block            2207 fs/gfs2/rgrp.c 		block++;
block            2315 fs/gfs2/rgrp.c 	u64 block;
block            2321 fs/gfs2/rgrp.c 			block = gfs2_rbm_to_block(rbm);
block            2322 fs/gfs2/rgrp.c 			ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
block            2389 fs/gfs2/rgrp.c 	u64 block; /* block, within the file system scope */
block            2410 fs/gfs2/rgrp.c 	block = gfs2_rbm_to_block(&rbm);
block            2411 fs/gfs2/rgrp.c 	rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
block            2419 fs/gfs2/rgrp.c 		ip->i_goal = block + ndata - 1;
block            2448 fs/gfs2/rgrp.c 		gfs2_trans_remove_revoke(sdp, block, *nblocks);
block            2453 fs/gfs2/rgrp.c 	trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
block            2455 fs/gfs2/rgrp.c 	*bn = block;
block            2598 fs/gfs2/rgrp.c 		    u64 block)
block            2615 fs/gfs2/rgrp.c 		if (rgrp_contains_block(rgd, block))
block            2617 fs/gfs2/rgrp.c 		rgd = gfs2_blk2rgrpd(sdp, block, 1);
block            2620 fs/gfs2/rgrp.c 		if (!rgd || !rgrp_contains_block(rgd, block))
block            2621 fs/gfs2/rgrp.c 			rgd = gfs2_blk2rgrpd(sdp, block, 1);
block            2626 fs/gfs2/rgrp.c 		       (unsigned long long)block);
block              68 fs/gfs2/rgrp.h 			   u64 block);
block              85 fs/gfs2/rgrp.h static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
block              89 fs/gfs2/rgrp.h 	return first <= block && block < last;
block             521 fs/gfs2/sys.c  GDLM_ATTR(block,		0644, block_show,		block_store);
block             333 fs/gfs2/trace_gfs2.h 		__field(	sector_t,	block		)
block             341 fs/gfs2/trace_gfs2.h 		__entry->block		= bd->bd_bh->b_blocknr;
block             348 fs/gfs2/trace_gfs2.h 		  (unsigned long long)__entry->block,
block             550 fs/gfs2/trace_gfs2.h 		 u64 block, unsigned len, u8 block_state),
block             552 fs/gfs2/trace_gfs2.h 	TP_ARGS(ip, rgd, block, len, block_state),
block             567 fs/gfs2/trace_gfs2.h 		__entry->start		= block;
block             625 fs/gfs2/xattr.c 	u64 block;
block             628 fs/gfs2/xattr.c 	error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
block             631 fs/gfs2/xattr.c 	gfs2_trans_remove_revoke(sdp, block, 1);
block             632 fs/gfs2/xattr.c 	*bhp = gfs2_meta_new(ip->i_gl, block);
block             686 fs/gfs2/xattr.c 			u64 block;
block             690 fs/gfs2/xattr.c 			error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
block             693 fs/gfs2/xattr.c 			gfs2_trans_remove_revoke(sdp, block, 1);
block             694 fs/gfs2/xattr.c 			bh = gfs2_meta_new(ip->i_gl, block);
block             241 fs/hfs/bnode.c 	int size, block, i, hash;
block             278 fs/hfs/bnode.c 	block = off >> PAGE_SHIFT;
block             281 fs/hfs/bnode.c 		page = read_mapping_page(mapping, block++, NULL);
block              21 fs/hfs/extent.c static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
block              26 fs/hfs/extent.c 	key->ext.FABN = cpu_to_be16(block);
block              82 fs/hfs/extent.c 			return be16_to_cpu(ext->block) + off;
block             107 fs/hfs/extent.c 	return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
block             151 fs/hfs/extent.c 					u32 cnid, u32 block, u8 type)
block             155 fs/hfs/extent.c 	hfs_ext_build_key(fd->search_key, cnid, block, type);
block             169 fs/hfs/extent.c static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
block             180 fs/hfs/extent.c 				    block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
block             191 fs/hfs/extent.c static int hfs_ext_read_extent(struct inode *inode, u16 block)
block             196 fs/hfs/extent.c 	if (block >= HFS_I(inode)->cached_start &&
block             197 fs/hfs/extent.c 	    block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
block             202 fs/hfs/extent.c 		res = __hfs_ext_cache_extent(&fd, inode, block);
block             215 fs/hfs/extent.c 			     be16_to_cpu(extent[i].block),
block             230 fs/hfs/extent.c 			start = be16_to_cpu(extent->block);
block             235 fs/hfs/extent.c 				extent->block = cpu_to_be16(alloc_block);
block             267 fs/hfs/extent.c 		start = be16_to_cpu(extent->block);
block             270 fs/hfs/extent.c 			extent->block = 0;
block             336 fs/hfs/extent.c int hfs_get_block(struct inode *inode, sector_t block,
block             345 fs/hfs/extent.c 	ablock = (u32)block / HFS_SB(sb)->fs_div;
block             347 fs/hfs/extent.c 	if (block >= HFS_I(inode)->fs_blocks) {
block             350 fs/hfs/extent.c 		if (block > HFS_I(inode)->fs_blocks)
block             379 fs/hfs/extent.c 	       (u32)block % HFS_SB(sb)->fs_div);
block             419 fs/hfs/extent.c 			HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
block             465 fs/hfs/extent.c 	HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
block             176 fs/hfs/hfs.h   	__be16 block;
block              67 fs/hfs/inode.c static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
block              69 fs/hfs/inode.c 	return generic_block_bmap(mapping, block, hfs_get_block);
block              71 fs/hfs/mdb.c   	unsigned int block;
block             174 fs/hfs/mdb.c   	block = be16_to_cpu(mdb->drVBMSt) + part_start;
block             175 fs/hfs/mdb.c   	off = (loff_t)block << HFS_SECTOR_SIZE_BITS;
block             302 fs/hfs/mdb.c   		sector_t block;
block             306 fs/hfs/mdb.c   		block = be16_to_cpu(HFS_SB(sb)->mdb->drVBMSt) + HFS_SB(sb)->part_start;
block             307 fs/hfs/mdb.c   		off = (block << HFS_SECTOR_SIZE_BITS) & (sb->s_blocksize - 1);
block             308 fs/hfs/mdb.c   		block >>= sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS;
block             312 fs/hfs/mdb.c   			bh = sb_bread(sb, block);
block             325 fs/hfs/mdb.c   			block++;
block             405 fs/hfsplus/bnode.c 	int size, block, i, hash;
block             444 fs/hfsplus/bnode.c 	block = off >> PAGE_SHIFT;
block             446 fs/hfsplus/bnode.c 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
block             447 fs/hfsplus/bnode.c 		page = read_mapping_page(mapping, block, NULL);
block              42 fs/hfsplus/extents.c 				  u32 block, u8 type)
block              46 fs/hfsplus/extents.c 	key->ext.start_block = cpu_to_be32(block);
block             158 fs/hfsplus/extents.c 					    u32 cnid, u32 block, u8 type)
block             162 fs/hfsplus/extents.c 	hfsplus_ext_build_key(fd->search_key, cnid, block, type);
block             178 fs/hfsplus/extents.c 		struct inode *inode, u32 block)
block             192 fs/hfsplus/extents.c 					block, HFSPLUS_IS_RSRC(inode) ?
block             206 fs/hfsplus/extents.c static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
block             212 fs/hfsplus/extents.c 	if (block >= hip->cached_start &&
block             213 fs/hfsplus/extents.c 	    block < hip->cached_start + hip->cached_blocks)
block             218 fs/hfsplus/extents.c 		res = __hfsplus_ext_cache_extent(&fd, inode, block);
block              61 fs/hfsplus/inode.c static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
block              63 fs/hfsplus/inode.c 	return generic_block_bmap(mapping, block, hfsplus_get_block);
block             188 fs/hpfs/file.c static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
block             190 fs/hpfs/file.c 	return generic_block_bmap(mapping, block, hpfs_get_block);
block            1613 fs/inode.c     sector_t bmap(struct inode *inode, sector_t block)
block            1617 fs/inode.c     		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
block              58 fs/ioctl.c     	int res, block;
block              65 fs/ioctl.c     	res = get_user(block, p);
block              68 fs/ioctl.c     	res = mapping->a_ops->bmap(mapping, block);
block              88 fs/isofs/dir.c 	unsigned long block, offset, block_saved, offset_saved;
block              99 fs/isofs/dir.c 	block = ctx->pos >> bufbits;
block             105 fs/isofs/dir.c 			bh = isofs_bread(inode, block);
block             124 fs/isofs/dir.c 			block = ctx->pos >> bufbits;
block             129 fs/isofs/dir.c 		block_saved = block;
block             138 fs/isofs/dir.c 			block++;
block             142 fs/isofs/dir.c 				bh = isofs_bread(inode, block);
block             153 fs/isofs/dir.c 			       " in block %lu of inode %lu\n", block,
block              21 fs/isofs/export.c 		  unsigned long block,
block              27 fs/isofs/export.c 	if (block == 0)
block              29 fs/isofs/export.c 	inode = isofs_iget(sb, block, offset);
block             154 fs/isofs/export.c 	u32 block;
block             170 fs/isofs/export.c 	return isofs_export_iget(sb, ifid->block, ifid->offset,
block             594 fs/isofs/inode.c static bool rootdir_empty(struct super_block *sb, unsigned long block)
block             600 fs/isofs/inode.c 	bh = sb_bread(sb, block);
block             633 fs/isofs/inode.c 	int iso_blknum, block;
block             675 fs/isofs/inode.c 		block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits);
block             676 fs/isofs/inode.c 		if (!(bh = sb_bread(s, block)))
block            1009 fs/isofs/inode.c 		__func__, s->s_id, iso_blknum, block);
block            1162 fs/isofs/inode.c static int isofs_bmap(struct inode *inode, sector_t block)
block            1169 fs/isofs/inode.c 	error = isofs_get_block(inode, block, &dummy, 0);
block            1175 fs/isofs/inode.c struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
block            1177 fs/isofs/inode.c 	sector_t blknr = isofs_bmap(inode, block);
block            1194 fs/isofs/inode.c static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
block            1196 fs/isofs/inode.c 	return generic_block_bmap(mapping,block,isofs_get_block);
block            1210 fs/isofs/inode.c 	unsigned long block, offset, block_saved, offset_saved;
block            1224 fs/isofs/inode.c 	block = ei->i_iget5_block;
block            1232 fs/isofs/inode.c 			bh = sb_bread(inode->i_sb, block);
block            1242 fs/isofs/inode.c 			++block;
block            1247 fs/isofs/inode.c 		block_saved = block;
block            1261 fs/isofs/inode.c 			block++;
block            1265 fs/isofs/inode.c 				bh = sb_bread(inode->i_sb, block);
block            1297 fs/isofs/inode.c 	printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block);
block            1313 fs/isofs/inode.c 	unsigned long block;
block            1323 fs/isofs/inode.c 	block = ei->i_iget5_block;
block            1324 fs/isofs/inode.c 	bh = sb_bread(inode->i_sb, block);
block            1343 fs/isofs/inode.c 		bh = sb_bread(inode->i_sb, ++block);
block            1505 fs/isofs/inode.c 	unsigned long block;
block            1514 fs/isofs/inode.c 	return (i->i_iget5_block == d->block)
block            1523 fs/isofs/inode.c 	i->i_iget5_block = d->block;
block            1533 fs/isofs/inode.c 			   unsigned long block,
block            1545 fs/isofs/inode.c 	data.block = block;
block            1548 fs/isofs/inode.c 	hashval = (block << sb->s_blocksize_bits) | offset;
block             126 fs/isofs/isofs.h 			   unsigned long block,
block             131 fs/isofs/isofs.h 				       unsigned long block,
block             134 fs/isofs/isofs.h 	return __isofs_iget(sb, block, offset, 0);
block             138 fs/isofs/isofs.h 					     unsigned long block,
block             141 fs/isofs/isofs.h 	return __isofs_iget(sb, block, offset, 1);
block             148 fs/isofs/isofs.h static inline unsigned long isofs_get_ino(unsigned long block,
block             152 fs/isofs/isofs.h 	return (block << (bufbits - 5)) | (offset >> 5);
block             188 fs/isofs/isofs.h 				 unsigned long *block,
block             194 fs/isofs/isofs.h 		*block = (unsigned long)isonum_733(de->extent)
block              42 fs/isofs/namei.c 	unsigned long block, f_pos, offset, block_saved, offset_saved;
block              51 fs/isofs/namei.c 	block = 0;
block              59 fs/isofs/namei.c 			bh = isofs_bread(dir, block);
block              71 fs/isofs/namei.c 			block = f_pos >> bufbits;
block              86 fs/isofs/namei.c 			block++;
block              90 fs/isofs/namei.c 				bh = isofs_bread(dir, block);
block             103 fs/isofs/namei.c 			       " in block %lu of inode %lu\n", block,
block             155 fs/isofs/namei.c 	unsigned long uninitialized_var(block);
block             165 fs/isofs/namei.c 				&block, &offset,
block             170 fs/isofs/namei.c 	inode = found ? isofs_iget(dir->i_sb, block, offset) : NULL;
block             704 fs/isofs/rock.c 	unsigned long block, offset;
block             714 fs/isofs/rock.c 	block = ei->i_iget5_block;
block             715 fs/isofs/rock.c 	bh = sb_bread(inode->i_sb, block);
block             318 fs/jbd2/commit.c 				   unsigned long long block)
block             320 fs/jbd2/commit.c 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
block             322 fs/jbd2/commit.c 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
block             881 fs/jbd2/journal.c 			      unsigned long *block)
block             891 fs/jbd2/journal.c 		*block = transaction->t_log_start;
block             894 fs/jbd2/journal.c 		*block = transaction->t_log_start;
block             897 fs/jbd2/journal.c 		*block = journal->j_head;
block             900 fs/jbd2/journal.c 		*block = journal->j_head;
block             919 fs/jbd2/journal.c int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
block             932 fs/jbd2/journal.c 	ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
block             938 fs/jbd2/journal.c 	freed = block - journal->j_tail;
block             939 fs/jbd2/journal.c 	if (block < journal->j_tail)
block             942 fs/jbd2/journal.c 	trace_jbd2_update_log_tail(journal, tid, block, freed);
block             946 fs/jbd2/journal.c 		  journal->j_tail_sequence, tid, block, freed);
block             950 fs/jbd2/journal.c 	journal->j_tail = block;
block             962 fs/jbd2/journal.c void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
block             966 fs/jbd2/journal.c 		__jbd2_update_log_tail(journal, tid, block);
block             340 fs/jbd2/recovery.c 	unsigned long long block = be32_to_cpu(tag->t_blocknr);
block             342 fs/jbd2/recovery.c 		block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
block             343 fs/jbd2/recovery.c 	return block;
block             131 fs/jbd2/revoke.c static inline int hash(journal_t *journal, unsigned long long block)
block             133 fs/jbd2/revoke.c 	return hash_64(block, journal->j_revoke->hash_shift);
block             291 fs/jffs2/erase.c 	struct jffs2_raw_node_ref *block, *ref;
block             295 fs/jffs2/erase.c 	block = ref = jeb->first_node;
block             300 fs/jffs2/erase.c 			jffs2_free_refblock(block);
block             301 fs/jffs2/erase.c 			block = ref;
block             329 fs/jfs/inode.c static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
block             331 fs/jfs/inode.c 	return generic_block_bmap(mapping, block, jfs_get_block);
block             389 fs/lockd/clnt4xdr.c 	encode_bool(xdr, args->block);
block             412 fs/lockd/clnt4xdr.c 	encode_bool(xdr, args->block);
block             102 fs/lockd/clntlock.c 	struct nlm_wait *block;
block             104 fs/lockd/clntlock.c 	block = kmalloc(sizeof(*block), GFP_KERNEL);
block             105 fs/lockd/clntlock.c 	if (block != NULL) {
block             106 fs/lockd/clntlock.c 		block->b_host = host;
block             107 fs/lockd/clntlock.c 		block->b_lock = fl;
block             108 fs/lockd/clntlock.c 		init_waitqueue_head(&block->b_wait);
block             109 fs/lockd/clntlock.c 		block->b_status = nlm_lck_blocked;
block             112 fs/lockd/clntlock.c 		list_add(&block->b_list, &nlm_blocked);
block             115 fs/lockd/clntlock.c 	return block;
block             118 fs/lockd/clntlock.c void nlmclnt_finish_block(struct nlm_wait *block)
block             120 fs/lockd/clntlock.c 	if (block == NULL)
block             123 fs/lockd/clntlock.c 	list_del(&block->b_list);
block             125 fs/lockd/clntlock.c 	kfree(block);
block             131 fs/lockd/clntlock.c int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
block             138 fs/lockd/clntlock.c 	if (block == NULL)
block             149 fs/lockd/clntlock.c 	ret = wait_event_interruptible_timeout(block->b_wait,
block             150 fs/lockd/clntlock.c 			block->b_status != nlm_lck_blocked,
block             155 fs/lockd/clntlock.c 	if (block->b_status == nlm_lck_denied_grace_period)
block             156 fs/lockd/clntlock.c 		block->b_status = nlm_lck_blocked;
block             157 fs/lockd/clntlock.c 	req->a_res.status = block->b_status;
block             168 fs/lockd/clntlock.c 	struct nlm_wait	*block;
block             176 fs/lockd/clntlock.c 	list_for_each_entry(block, &nlm_blocked, b_list) {
block             177 fs/lockd/clntlock.c 		struct file_lock *fl_blocked = block->b_lock;
block             189 fs/lockd/clntlock.c 		if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
block             196 fs/lockd/clntlock.c 		block->b_status = nlm_granted;
block             197 fs/lockd/clntlock.c 		wake_up(&block->b_wait);
block             232 fs/lockd/clntlock.c 	struct nlm_wait	  *block;
block             286 fs/lockd/clntlock.c 	list_for_each_entry(block, &nlm_blocked, b_list) {
block             287 fs/lockd/clntlock.c 		if (block->b_host == host) {
block             288 fs/lockd/clntlock.c 			block->b_status = nlm_lck_denied_grace_period;
block             289 fs/lockd/clntlock.c 			wake_up(&block->b_wait);
block             183 fs/lockd/clntproc.c 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
block             518 fs/lockd/clntproc.c 	struct nlm_wait *block = NULL;
block             533 fs/lockd/clntproc.c 	block = nlmclnt_prepare_block(host, fl);
block             552 fs/lockd/clntproc.c 		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
block             563 fs/lockd/clntproc.c 		if (!req->a_args.block)
block             565 fs/lockd/clntproc.c 		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
block             596 fs/lockd/clntproc.c 	nlmclnt_finish_block(block);
block             604 fs/lockd/clntproc.c 	nlmclnt_finish_block(block);
block             758 fs/lockd/clntproc.c static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
block             772 fs/lockd/clntproc.c 	req->a_args.block = block;
block             382 fs/lockd/clntxdr.c 	encode_bool(xdr, args->block);
block             405 fs/lockd/clntxdr.c 	encode_bool(xdr, args->block);
block             145 fs/lockd/svc4proc.c 					argp->block, &argp->cookie,
block              43 fs/lockd/svclock.c static void nlmsvc_release_block(struct nlm_block *block);
block              44 fs/lockd/svclock.c static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
block              45 fs/lockd/svclock.c static void	nlmsvc_remove_block(struct nlm_block *block);
block              90 fs/lockd/svclock.c nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
block              95 fs/lockd/svclock.c 	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
block              96 fs/lockd/svclock.c 	if (list_empty(&block->b_list)) {
block              97 fs/lockd/svclock.c 		kref_get(&block->b_count);
block              99 fs/lockd/svclock.c 		list_del_init(&block->b_list);
block             116 fs/lockd/svclock.c 	list_add_tail(&block->b_list, pos);
block             117 fs/lockd/svclock.c 	block->b_when = when;
block             120 fs/lockd/svclock.c static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
block             123 fs/lockd/svclock.c 	nlmsvc_insert_block_locked(block, when);
block             131 fs/lockd/svclock.c nlmsvc_remove_block(struct nlm_block *block)
block             133 fs/lockd/svclock.c 	if (!list_empty(&block->b_list)) {
block             135 fs/lockd/svclock.c 		list_del_init(&block->b_list);
block             137 fs/lockd/svclock.c 		nlmsvc_release_block(block);
block             147 fs/lockd/svclock.c 	struct nlm_block	*block;
block             154 fs/lockd/svclock.c 	list_for_each_entry(block, &nlm_blocked, b_list) {
block             155 fs/lockd/svclock.c 		fl = &block->b_call->a_args.lock.fl;
block             157 fs/lockd/svclock.c 				block->b_file, fl->fl_pid,
block             160 fs/lockd/svclock.c 				nlmdbg_cookie2a(&block->b_call->a_args.cookie));
block             161 fs/lockd/svclock.c 		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
block             162 fs/lockd/svclock.c 			kref_get(&block->b_count);
block             163 fs/lockd/svclock.c 			return block;
block             185 fs/lockd/svclock.c 	struct nlm_block *block;
block             187 fs/lockd/svclock.c 	list_for_each_entry(block, &nlm_blocked, b_list) {
block             188 fs/lockd/svclock.c 		if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
block             195 fs/lockd/svclock.c 	dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
block             196 fs/lockd/svclock.c 	kref_get(&block->b_count);
block             197 fs/lockd/svclock.c 	return block;
block             220 fs/lockd/svclock.c 	struct nlm_block	*block;
block             228 fs/lockd/svclock.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             229 fs/lockd/svclock.c 	if (block == NULL)
block             231 fs/lockd/svclock.c 	kref_init(&block->b_count);
block             232 fs/lockd/svclock.c 	INIT_LIST_HEAD(&block->b_list);
block             233 fs/lockd/svclock.c 	INIT_LIST_HEAD(&block->b_flist);
block             243 fs/lockd/svclock.c 	dprintk("lockd: created block %p...\n", block);
block             246 fs/lockd/svclock.c 	block->b_daemon = rqstp->rq_server;
block             247 fs/lockd/svclock.c 	block->b_host   = host;
block             248 fs/lockd/svclock.c 	block->b_file   = file;
block             252 fs/lockd/svclock.c 	list_add(&block->b_flist, &file->f_blocks);
block             255 fs/lockd/svclock.c 	block->b_call = call;
block             257 fs/lockd/svclock.c 	call->a_block = block;
block             259 fs/lockd/svclock.c 	return block;
block             262 fs/lockd/svclock.c 	kfree(block);
block             273 fs/lockd/svclock.c static int nlmsvc_unlink_block(struct nlm_block *block)
block             276 fs/lockd/svclock.c 	dprintk("lockd: unlinking block %p...\n", block);
block             279 fs/lockd/svclock.c 	status = locks_delete_block(&block->b_call->a_args.lock.fl);
block             280 fs/lockd/svclock.c 	nlmsvc_remove_block(block);
block             286 fs/lockd/svclock.c 	struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
block             287 fs/lockd/svclock.c 	struct nlm_file		*file = block->b_file;
block             289 fs/lockd/svclock.c 	dprintk("lockd: freeing block %p...\n", block);
block             292 fs/lockd/svclock.c 	list_del_init(&block->b_flist);
block             295 fs/lockd/svclock.c 	nlmsvc_freegrantargs(block->b_call);
block             296 fs/lockd/svclock.c 	nlmsvc_release_call(block->b_call);
block             297 fs/lockd/svclock.c 	nlm_release_file(block->b_file);
block             298 fs/lockd/svclock.c 	kfree(block);
block             301 fs/lockd/svclock.c static void nlmsvc_release_block(struct nlm_block *block)
block             303 fs/lockd/svclock.c 	if (block != NULL)
block             304 fs/lockd/svclock.c 		kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
block             315 fs/lockd/svclock.c 	struct nlm_block *block, *next;
block             319 fs/lockd/svclock.c 	list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
block             320 fs/lockd/svclock.c 		if (!match(block->b_host, host))
block             324 fs/lockd/svclock.c 		if (list_empty(&block->b_list))
block             326 fs/lockd/svclock.c 		kref_get(&block->b_count);
block             328 fs/lockd/svclock.c 		nlmsvc_unlink_block(block);
block             329 fs/lockd/svclock.c 		nlmsvc_release_block(block);
block             460 fs/lockd/svclock.c nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
block             464 fs/lockd/svclock.c 	block->b_flags |= B_QUEUED;
block             466 fs/lockd/svclock.c 	nlmsvc_insert_block(block, NLM_TIMEOUT);
block             468 fs/lockd/svclock.c 	block->b_cache_req = &rqstp->rq_chandle;
block             470 fs/lockd/svclock.c 		block->b_deferred_req =
block             471 fs/lockd/svclock.c 			rqstp->rq_chandle.defer(block->b_cache_req);
block             472 fs/lockd/svclock.c 		if (block->b_deferred_req != NULL)
block             476 fs/lockd/svclock.c 		block, block->b_flags, ntohl(status));
block             490 fs/lockd/svclock.c 	struct nlm_block	*block = NULL;
block             507 fs/lockd/svclock.c 	block = nlmsvc_lookup_block(file, lock);
block             508 fs/lockd/svclock.c 	if (block == NULL) {
block             509 fs/lockd/svclock.c 		block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
block             511 fs/lockd/svclock.c 		if (block == NULL)
block             513 fs/lockd/svclock.c 		lock = &block->b_call->a_args.lock;
block             517 fs/lockd/svclock.c 	if (block->b_flags & B_QUEUED) {
block             519 fs/lockd/svclock.c 							block, block->b_flags);
block             520 fs/lockd/svclock.c 		if (block->b_granted) {
block             521 fs/lockd/svclock.c 			nlmsvc_unlink_block(block);
block             525 fs/lockd/svclock.c 		if (block->b_flags & B_TIMED_OUT) {
block             526 fs/lockd/svclock.c 			nlmsvc_unlink_block(block);
block             568 fs/lockd/svclock.c 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
block             581 fs/lockd/svclock.c 	nlmsvc_insert_block(block, NLM_NEVER);
block             584 fs/lockd/svclock.c 	nlmsvc_release_block(block);
block             690 fs/lockd/svclock.c 	struct nlm_block	*block;
block             704 fs/lockd/svclock.c 	block = nlmsvc_lookup_block(file, lock);
block             706 fs/lockd/svclock.c 	if (block != NULL) {
block             707 fs/lockd/svclock.c 		vfs_cancel_lock(block->b_file->f_file,
block             708 fs/lockd/svclock.c 				&block->b_call->a_args.lock.fl);
block             709 fs/lockd/svclock.c 		status = nlmsvc_unlink_block(block);
block             710 fs/lockd/svclock.c 		nlmsvc_release_block(block);
block             725 fs/lockd/svclock.c nlmsvc_update_deferred_block(struct nlm_block *block, int result)
block             727 fs/lockd/svclock.c 	block->b_flags |= B_GOT_CALLBACK;
block             729 fs/lockd/svclock.c 		block->b_granted = 1;
block             731 fs/lockd/svclock.c 		block->b_flags |= B_TIMED_OUT;
block             736 fs/lockd/svclock.c 	struct nlm_block *block;
block             740 fs/lockd/svclock.c 	list_for_each_entry(block, &nlm_blocked, b_list) {
block             741 fs/lockd/svclock.c 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
block             743 fs/lockd/svclock.c 							block, block->b_flags);
block             744 fs/lockd/svclock.c 			if (block->b_flags & B_QUEUED) {
block             745 fs/lockd/svclock.c 				if (block->b_flags & B_TIMED_OUT) {
block             749 fs/lockd/svclock.c 				nlmsvc_update_deferred_block(block, result);
block             751 fs/lockd/svclock.c 				block->b_granted = 1;
block             753 fs/lockd/svclock.c 			nlmsvc_insert_block_locked(block, 0);
block             754 fs/lockd/svclock.c 			svc_wake_up(block->b_daemon);
block             775 fs/lockd/svclock.c 	struct nlm_block	*block;
block             779 fs/lockd/svclock.c 	list_for_each_entry(block, &nlm_blocked, b_list) {
block             780 fs/lockd/svclock.c 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
block             781 fs/lockd/svclock.c 			nlmsvc_insert_block_locked(block, 0);
block             783 fs/lockd/svclock.c 			svc_wake_up(block->b_daemon);
block             808 fs/lockd/svclock.c nlmsvc_grant_blocked(struct nlm_block *block)
block             810 fs/lockd/svclock.c 	struct nlm_file		*file = block->b_file;
block             811 fs/lockd/svclock.c 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
block             815 fs/lockd/svclock.c 	dprintk("lockd: grant blocked lock %p\n", block);
block             817 fs/lockd/svclock.c 	kref_get(&block->b_count);
block             820 fs/lockd/svclock.c 	nlmsvc_unlink_block(block);
block             825 fs/lockd/svclock.c 	if (block->b_granted) {
block             826 fs/lockd/svclock.c 		nlm_rebind_host(block->b_host);
block             847 fs/lockd/svclock.c 		nlmsvc_insert_block(block, NLM_NEVER);
block             848 fs/lockd/svclock.c 		nlmsvc_release_block(block);
block             853 fs/lockd/svclock.c 		nlmsvc_insert_block(block, 10 * HZ);
block             854 fs/lockd/svclock.c 		nlmsvc_release_block(block);
block             861 fs/lockd/svclock.c 	block->b_granted = 1;
block             866 fs/lockd/svclock.c 	nlmsvc_insert_block(block, NLM_NEVER);
block             871 fs/lockd/svclock.c 	error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
block             876 fs/lockd/svclock.c 		nlmsvc_insert_block(block, 10 * HZ);
block             890 fs/lockd/svclock.c 	struct nlm_block	*block = call->a_block;
block             904 fs/lockd/svclock.c 	if (list_empty(&block->b_list))
block             917 fs/lockd/svclock.c 	nlmsvc_insert_block_locked(block, timeout);
block             918 fs/lockd/svclock.c 	svc_wake_up(block->b_daemon);
block             945 fs/lockd/svclock.c 	struct nlm_block	*block;
block             949 fs/lockd/svclock.c 	if (!(block = nlmsvc_find_block(cookie)))
block             954 fs/lockd/svclock.c 		nlmsvc_insert_block(block, 10 * HZ);
block             960 fs/lockd/svclock.c 		nlmsvc_unlink_block(block);
block             962 fs/lockd/svclock.c 	nlmsvc_release_block(block);
block             970 fs/lockd/svclock.c retry_deferred_block(struct nlm_block *block)
block             972 fs/lockd/svclock.c 	if (!(block->b_flags & B_GOT_CALLBACK))
block             973 fs/lockd/svclock.c 		block->b_flags |= B_TIMED_OUT;
block             974 fs/lockd/svclock.c 	nlmsvc_insert_block(block, NLM_TIMEOUT);
block             975 fs/lockd/svclock.c 	dprintk("revisit block %p flags %d\n",	block, block->b_flags);
block             976 fs/lockd/svclock.c 	if (block->b_deferred_req) {
block             977 fs/lockd/svclock.c 		block->b_deferred_req->revisit(block->b_deferred_req, 0);
block             978 fs/lockd/svclock.c 		block->b_deferred_req = NULL;
block             991 fs/lockd/svclock.c 	struct nlm_block *block;
block             995 fs/lockd/svclock.c 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
block             997 fs/lockd/svclock.c 		if (block->b_when == NLM_NEVER)
block             999 fs/lockd/svclock.c 		if (time_after(block->b_when, jiffies)) {
block            1000 fs/lockd/svclock.c 			timeout = block->b_when - jiffies;
block            1006 fs/lockd/svclock.c 			block, block->b_when);
block            1007 fs/lockd/svclock.c 		if (block->b_flags & B_QUEUED) {
block            1009 fs/lockd/svclock.c 				block, block->b_granted, block->b_flags);
block            1010 fs/lockd/svclock.c 			retry_deferred_block(block);
block            1012 fs/lockd/svclock.c 			nlmsvc_grant_blocked(block);
block             176 fs/lockd/svcproc.c 					       argp->block, &argp->cookie,
block             219 fs/lockd/xdr.c 	argp->block  = ntohl(*p++);
block             240 fs/lockd/xdr.c 	argp->block = ntohl(*p++);
block             216 fs/lockd/xdr4.c 	argp->block  = ntohl(*p++);
block             237 fs/lockd/xdr4.c 	argp->block = ntohl(*p++);
block              42 fs/minix/bitmap.c void minix_free_block(struct inode *inode, unsigned long block)
block              50 fs/minix/bitmap.c 	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
block              54 fs/minix/bitmap.c 	zone = block - sbi->s_firstdatazone + 1;
block              65 fs/minix/bitmap.c 		       sb->s_id, block);
block             109 fs/minix/bitmap.c 	int block;
block             119 fs/minix/bitmap.c 	block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
block             121 fs/minix/bitmap.c 	*bh = sb_bread(sb, block);
block             133 fs/minix/bitmap.c 	int block;
block             145 fs/minix/bitmap.c 	block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
block             147 fs/minix/bitmap.c 	*bh = sb_bread(sb, block);
block             159 fs/minix/inode.c 	unsigned long i, block;
block             243 fs/minix/inode.c 	block=2;
block             245 fs/minix/inode.c 		if (!(sbi->s_imap[i]=sb_bread(s, block)))
block             247 fs/minix/inode.c 		block++;
block             250 fs/minix/inode.c 		if (!(sbi->s_zmap[i]=sb_bread(s, block)))
block             252 fs/minix/inode.c 		block++;
block             262 fs/minix/inode.c 	block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
block             263 fs/minix/inode.c 	if (sbi->s_imap_blocks < block) {
block             269 fs/minix/inode.c 	block = minix_blocks_needed(
block             272 fs/minix/inode.c 	if (sbi->s_zmap_blocks < block) {
block             372 fs/minix/inode.c static int minix_get_block(struct inode *inode, sector_t block,
block             376 fs/minix/inode.c 		return V1_minix_get_block(inode, block, bh_result, create);
block             378 fs/minix/inode.c 		return V2_minix_get_block(inode, block, bh_result, create);
block             420 fs/minix/inode.c static sector_t minix_bmap(struct address_space *mapping, sector_t block)
block             422 fs/minix/inode.c 	return generic_block_bmap(mapping,block,minix_get_block);
block             146 fs/minix/itree_common.c static int get_block(struct inode * inode, sector_t block,
block             154 fs/minix/itree_common.c 	int depth = block_to_path(inode, block, offsets);
block              25 fs/minix/itree_v1.c static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
block              29 fs/minix/itree_v1.c 	if (block < 0) {
block              31 fs/minix/itree_v1.c 			block, inode->i_sb->s_bdev);
block              32 fs/minix/itree_v1.c 	} else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
block              36 fs/minix/itree_v1.c 				block, inode->i_sb->s_bdev);
block              37 fs/minix/itree_v1.c 	} else if (block < 7) {
block              38 fs/minix/itree_v1.c 		offsets[n++] = block;
block              39 fs/minix/itree_v1.c 	} else if ((block -= 7) < 512) {
block              41 fs/minix/itree_v1.c 		offsets[n++] = block;
block              43 fs/minix/itree_v1.c 		block -= 512;
block              45 fs/minix/itree_v1.c 		offsets[n++] = block>>9;
block              46 fs/minix/itree_v1.c 		offsets[n++] = block & 511;
block              53 fs/minix/itree_v1.c int V1_minix_get_block(struct inode * inode, long block,
block              56 fs/minix/itree_v1.c 	return get_block(inode, block, bh_result, create);
block              27 fs/minix/itree_v2.c static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
block              32 fs/minix/itree_v2.c 	if (block < 0) {
block              34 fs/minix/itree_v2.c 			block, sb->s_bdev);
block              35 fs/minix/itree_v2.c 	} else if ((u64)block * (u64)sb->s_blocksize >=
block              40 fs/minix/itree_v2.c 				block, sb->s_bdev);
block              41 fs/minix/itree_v2.c 	} else if (block < DIRCOUNT) {
block              42 fs/minix/itree_v2.c 		offsets[n++] = block;
block              43 fs/minix/itree_v2.c 	} else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
block              45 fs/minix/itree_v2.c 		offsets[n++] = block;
block              46 fs/minix/itree_v2.c 	} else if ((block -= INDIRCOUNT(sb)) < INDIRCOUNT(sb) * INDIRCOUNT(sb)) {
block              48 fs/minix/itree_v2.c 		offsets[n++] = block / INDIRCOUNT(sb);
block              49 fs/minix/itree_v2.c 		offsets[n++] = block % INDIRCOUNT(sb);
block              51 fs/minix/itree_v2.c 		block -= INDIRCOUNT(sb) * INDIRCOUNT(sb);
block              53 fs/minix/itree_v2.c 		offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb);
block              54 fs/minix/itree_v2.c 		offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb);
block              55 fs/minix/itree_v2.c 		offsets[n++] = block % INDIRCOUNT(sb);
block              62 fs/minix/itree_v2.c int V2_minix_get_block(struct inode * inode, long block,
block              65 fs/minix/itree_v2.c 	return get_block(inode, block, bh_result, create);
block              53 fs/minix/minix.h extern void minix_free_block(struct inode *inode, unsigned long block);
block             108 fs/mpage.c     	int block = 0;
block             125 fs/mpage.c     		if (block == page_block) {
block             132 fs/mpage.c     		block++;
block            6905 fs/nfs/nfs4proc.c 		data->arg.block = 1;
block            1297 fs/nfs/nfs4xdr.c static inline int nfs4_lock_type(struct file_lock *fl, int block)
block            1300 fs/nfs/nfs4xdr.c 		return block ? NFS4_READW_LT : NFS4_READ_LT;
block            1301 fs/nfs/nfs4xdr.c 	return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
block            1333 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
block              30 fs/nilfs2/mdt.c nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
block              44 fs/nilfs2/mdt.c 	ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
block              61 fs/nilfs2/mdt.c 	trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
block              66 fs/nilfs2/mdt.c static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
block              80 fs/nilfs2/mdt.c 	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
block              93 fs/nilfs2/mdt.c 	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
block             167 fs/nilfs2/mdt.c static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
block             175 fs/nilfs2/mdt.c 	err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh);
block             183 fs/nilfs2/mdt.c 		blkoff = block + 1;
block             204 fs/nilfs2/mdt.c 			  inode->i_ino, block);
block             328 fs/nilfs2/mdt.c int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
block             333 fs/nilfs2/mdt.c 	err = nilfs_bmap_delete(ii->i_bmap, block);
block             336 fs/nilfs2/mdt.c 		nilfs_mdt_forget_block(inode, block);
block             356 fs/nilfs2/mdt.c int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
block             358 fs/nilfs2/mdt.c 	pgoff_t index = (pgoff_t)block >>
block             376 fs/nilfs2/mdt.c 		bh = nilfs_page_get_nth_block(page, block - first_block);
block              29 fs/nilfs2/page.c __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
block              40 fs/nilfs2/page.c 	bh = nilfs_page_get_nth_block(page, block - first_block);
block             539 fs/ntfs/aops.c 	sector_t block, dblock, iblock;
block             585 fs/ntfs/aops.c 	block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
block             618 fs/ntfs/aops.c 		if (unlikely(block >= dblock)) {
block             640 fs/ntfs/aops.c 		if (unlikely((block >= iblock) &&
block             648 fs/ntfs/aops.c 			if (block > iblock) {
block             702 fs/ntfs/aops.c 		vcn = (VCN)block << blocksize_bits;
block             795 fs/ntfs/aops.c 	} while (block++, (bh = bh->b_this_page) != head);
block             905 fs/ntfs/aops.c 	sector_t block, dblock, rec_block;
block             955 fs/ntfs/aops.c 	rec_block = block = (sector_t)page->index <<
block             968 fs/ntfs/aops.c 		if (likely(block < rec_block)) {
block             969 fs/ntfs/aops.c 			if (unlikely(block >= dblock)) {
block             987 fs/ntfs/aops.c 			BUG_ON(block > rec_block);
block             991 fs/ntfs/aops.c 			if (unlikely(block >= dblock)) {
block            1011 fs/ntfs/aops.c 			vcn = (VCN)block << bh_size_bits;
block            1068 fs/ntfs/aops.c 						(long long)block <<
block            1098 fs/ntfs/aops.c 	} while (block++, (bh = bh->b_this_page) != head);
block            1549 fs/ntfs/aops.c static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
block            1561 fs/ntfs/aops.c 			ni->mft_no, (unsigned long long)block);
block            1575 fs/ntfs/aops.c 	ofs = (s64)block << blocksize_bits;
block            1632 fs/ntfs/aops.c 	if (unlikely(sizeof(block) < sizeof(lcn))) {
block            1633 fs/ntfs/aops.c 		block = lcn = ((lcn << cluster_size_shift) + delta) >>
block            1636 fs/ntfs/aops.c 		if (unlikely(block != lcn)) {
block            1643 fs/ntfs/aops.c 		block = ((lcn << cluster_size_shift) + delta) >>
block            1646 fs/ntfs/aops.c 	return block;
block             706 fs/ntfs/attrib.c 	unsigned long block, max_block;
block             740 fs/ntfs/attrib.c 		block = lcn << vol->cluster_size_bits >> block_size_bits;
block             742 fs/ntfs/attrib.c 		max_block = block + (rl->length << vol->cluster_size_bits >>
block             746 fs/ntfs/attrib.c 			ntfs_debug("Reading block = 0x%lx.", block);
block             747 fs/ntfs/attrib.c 			bh = sb_bread(sb, block);
block             758 fs/ntfs/attrib.c 		} while (++block < max_block);
block             501 fs/ntfs/compress.c 	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
block             634 fs/ntfs/compress.c 		block = lcn << vol->cluster_size_bits >> block_size_bits;
block             636 fs/ntfs/compress.c 		max_block = block + (vol->cluster_size >> block_size_bits);
block             638 fs/ntfs/compress.c 			ntfs_debug("block = 0x%x.", block);
block             639 fs/ntfs/compress.c 			if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
block             642 fs/ntfs/compress.c 		} while (++block < max_block);
block            1743 fs/ntfs/inode.c 	s64 block;
block            1794 fs/ntfs/inode.c 	block = vol->mft_lcn << vol->cluster_size_bits >>
block            1802 fs/ntfs/inode.c 		bh = sb_bread(sb, block++);
block             763 fs/ntfs/logfile.c 		sector_t block, end_block;
block             781 fs/ntfs/logfile.c 		block = lcn << vol->cluster_size_bits >> block_size_bits;
block             792 fs/ntfs/logfile.c 			bh = sb_getblk(sb, block);
block             819 fs/ntfs/logfile.c 		} while (++block < end_block);
block            4786 fs/ocfs2/alloc.c 	u64 block;
block            4841 fs/ocfs2/alloc.c 	block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
block            4845 fs/ocfs2/alloc.c 	status = ocfs2_insert_extent(handle, et, *logical_offset, block,
block            7055 fs/ocfs2/alloc.c 	u64 uninitialized_var(block);
block            7121 fs/ocfs2/alloc.c 		block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
block            7175 fs/ocfs2/alloc.c 		ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
block             457 fs/ocfs2/aops.c static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
block             465 fs/ocfs2/aops.c 			 (unsigned long long)block);
block             490 fs/ocfs2/aops.c 		err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
block             500 fs/ocfs2/aops.c 		     (unsigned long long)block);
block              91 fs/ocfs2/buffer_head_io.c int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
block              99 fs/ocfs2/buffer_head_io.c 	trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
block             111 fs/ocfs2/buffer_head_io.c 			bhs[i] = sb_getblk(osb->sb, block++);
block             195 fs/ocfs2/buffer_head_io.c int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
block             206 fs/ocfs2/buffer_head_io.c 	trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
block             238 fs/ocfs2/buffer_head_io.c 			bhs[i] = sb_getblk(sb, block++);
block             398 fs/ocfs2/buffer_head_io.c 	trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
block              20 fs/ocfs2/buffer_head_io.h int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
block              30 fs/ocfs2/buffer_head_io.h int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
block             673 fs/ocfs2/dir.c 	unsigned long start, block, b;
block             687 fs/ocfs2/dir.c 	block = start;
block             697 fs/ocfs2/dir.c 			b = block;
block             704 fs/ocfs2/dir.c 				if (b >= nblocks || (num && block == start)) {
block             718 fs/ocfs2/dir.c 		if (ocfs2_read_dir_block(dir, block, &bh, 0)) {
block             724 fs/ocfs2/dir.c 				    block);
block             728 fs/ocfs2/dir.c 					  block << sb->s_blocksize_bits,
block             732 fs/ocfs2/dir.c 			OCFS2_I(dir)->ip_dir_start_lookup = block;
block             741 fs/ocfs2/dir.c 		if (++block >= nblocks)
block             742 fs/ocfs2/dir.c 			block = 0;
block             743 fs/ocfs2/dir.c 	} while (block != start);
block             749 fs/ocfs2/dir.c 	block = nblocks;
block             751 fs/ocfs2/dir.c 	if (block < nblocks) {
block            2687 fs/ocfs2/ocfs2_trace.h 	TP_PROTO(unsigned long long block, void *ci),
block            2688 fs/ocfs2/ocfs2_trace.h 	TP_ARGS(block, ci),
block            2690 fs/ocfs2/ocfs2_trace.h 		__field(unsigned long long, block)
block            2694 fs/ocfs2/ocfs2_trace.h 		__entry->block = block;
block            2697 fs/ocfs2/ocfs2_trace.h 	TP_printk("%llu %p", __entry->block, __entry->ci)
block            2701 fs/ocfs2/ocfs2_trace.h 	TP_PROTO(void *ci, unsigned long long block,
block            2703 fs/ocfs2/ocfs2_trace.h 	TP_ARGS(ci, block, nr, flags),
block            2706 fs/ocfs2/ocfs2_trace.h 		__field(unsigned long long, block)
block            2712 fs/ocfs2/ocfs2_trace.h 		__entry->block = block;
block            2716 fs/ocfs2/ocfs2_trace.h 	TP_printk("%p %llu %u %d", __entry->ci, __entry->block,
block             142 fs/ocfs2/suballoc.h static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit)
block             144 fs/ocfs2/suballoc.h 	u64 group = block - (u64) bit;
block             120 fs/ocfs2/super.c 			    int block,
block            1757 fs/ocfs2/super.c 			    int block,
block            1765 fs/ocfs2/super.c 	*bh = sb_getblk(sb, block);
block             210 fs/ocfs2/uptodate.c 			sector_t block)
block             218 fs/ocfs2/uptodate.c 		if (block < item->c_block)
block             220 fs/ocfs2/uptodate.c 		else if (block > item->c_block)
block             290 fs/ocfs2/uptodate.c 				     sector_t block)
block             296 fs/ocfs2/uptodate.c 		(unsigned long long)block, ci->ci_num_cached);
block             298 fs/ocfs2/uptodate.c 	ci->ci_cache.ci_array[ci->ci_num_cached] = block;
block             308 fs/ocfs2/uptodate.c 	sector_t block = new->c_block;
block             315 fs/ocfs2/uptodate.c 		(unsigned long long)block, ci->ci_num_cached);
block             322 fs/ocfs2/uptodate.c 		if (block < tmp->c_block)
block             324 fs/ocfs2/uptodate.c 		else if (block > tmp->c_block)
block             329 fs/ocfs2/uptodate.c 			     (unsigned long long) block);
block             387 fs/ocfs2/uptodate.c 					sector_t block,
block             397 fs/ocfs2/uptodate.c 		(unsigned long long)block, expand_tree);
block             404 fs/ocfs2/uptodate.c 	new->c_block = block;
block             425 fs/ocfs2/uptodate.c 		ocfs2_append_cache_array(ci, block);
block             558 fs/ocfs2/uptodate.c 					  sector_t block)
block             566 fs/ocfs2/uptodate.c 		(unsigned long long) block, ci->ci_num_cached,
block             570 fs/ocfs2/uptodate.c 		index = ocfs2_search_cache_array(ci, block);
block             574 fs/ocfs2/uptodate.c 		item = ocfs2_search_cache_tree(ci, block);
block             592 fs/ocfs2/uptodate.c 	sector_t block = bh->b_blocknr;
block             594 fs/ocfs2/uptodate.c 	ocfs2_remove_block_from_cache(ci, block);
block             599 fs/ocfs2/uptodate.c 					    sector_t block,
block             605 fs/ocfs2/uptodate.c 	for (i = 0; i < b_len; i++, block++)
block             606 fs/ocfs2/uptodate.c 		ocfs2_remove_block_from_cache(ci, block);
block              65 fs/ocfs2/uptodate.h 					    sector_t block,
block             819 fs/ocfs2/xattr.c 	u64 block;
block             846 fs/ocfs2/xattr.c 		block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
block             848 fs/ocfs2/xattr.c 						       block, alloc_size);
block            1724 fs/ocfs2/xattr.c 	int block, block_offset;
block            1727 fs/ocfs2/xattr.c 	block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
block            1730 fs/ocfs2/xattr.c 	return bucket_block(bucket, block) + block_offset;
block            2481 fs/ocfs2/xattr.c 				  u64 block,
block            2495 fs/ocfs2/xattr.c 	ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
block            5108 fs/ocfs2/xattr.c 	u64 block;
block            5137 fs/ocfs2/xattr.c 	block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
block            5138 fs/ocfs2/xattr.c 	trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
block            5140 fs/ocfs2/xattr.c 	if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
block            5159 fs/ocfs2/xattr.c 						       block,
block            5169 fs/ocfs2/xattr.c 	trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
block            5171 fs/ocfs2/xattr.c 	ret = ocfs2_insert_extent(handle, &et, v_start, block,
block              91 fs/omfs/bitmap.c int omfs_allocate_block(struct super_block *sb, u64 block)
block             100 fs/omfs/bitmap.c 	tmp = block;
block             175 fs/omfs/bitmap.c int omfs_clear_range(struct super_block *sb, u64 block, int count)
block             183 fs/omfs/bitmap.c 	tmp = block;
block              34 fs/omfs/dir.c  static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block,
block              43 fs/omfs/dir.c  	while (block != ~0) {
block              44 fs/omfs/dir.c  		bh = omfs_bread(dir->i_sb, block);
block              51 fs/omfs/dir.c  		if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, block)) {
block              59 fs/omfs/dir.c  		*prev_block = block;
block              60 fs/omfs/dir.c  		block = be64_to_cpu(oi->i_sibling);
block              72 fs/omfs/dir.c  	u64 block, dummy;
block              78 fs/omfs/dir.c  	block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs]));
block              81 fs/omfs/dir.c  	return omfs_scan_list(dir, block, name, namelen, &dummy);
block             118 fs/omfs/dir.c  	u64 block;
block             128 fs/omfs/dir.c  	block = be64_to_cpu(*entry);
block             141 fs/omfs/dir.c  	oi->i_sibling = cpu_to_be64(block);
block             165 fs/omfs/dir.c  	u64 block, prev;
block             175 fs/omfs/dir.c  	block = be64_to_cpu(*entry);
block             177 fs/omfs/dir.c  	bh2 = omfs_scan_list(dir, block, name, namelen, &prev);
block             192 fs/omfs/file.c 			sector_t block, int count, int *left)
block             200 fs/omfs/file.c 		if (block >= searched  &&
block             201 fs/omfs/file.c 		    block < searched + numblocks) {
block             206 fs/omfs/file.c 			*left = numblocks - (block - searched);
block             209 fs/omfs/file.c 				block - searched;
block             217 fs/omfs/file.c static int omfs_get_block(struct inode *inode, sector_t block,
block             253 fs/omfs/file.c 		offset = find_block(inode, entry, block, extent_count, &remain);
block             333 fs/omfs/file.c static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
block             335 fs/omfs/file.c 	return generic_block_bmap(mapping, block, omfs_get_block);
block              24 fs/omfs/inode.c struct buffer_head *omfs_bread(struct super_block *sb, sector_t block)
block              27 fs/omfs/inode.c 	if (block >= sbi->s_num_blocks)
block              30 fs/omfs/inode.c 	return sb_bread(sb, clus_to_blk(sbi, block));
block             344 fs/omfs/inode.c 	sector_t block;
block             357 fs/omfs/inode.c 	block = clus_to_blk(sbi, sbi->s_bitmap_ino);
block             358 fs/omfs/inode.c 	if (block >= sbi->s_num_blocks)
block             363 fs/omfs/inode.c 		bh = sb_bread(sb, block++);
block              30 fs/omfs/omfs.h static inline sector_t clus_to_blk(struct omfs_sb_info *sbi, sector_t block)
block              32 fs/omfs/omfs.h 	return block << sbi->s_block_shift;
block              42 fs/omfs/omfs.h extern int omfs_allocate_block(struct super_block *sb, u64 block);
block              45 fs/omfs/omfs.h extern int omfs_clear_range(struct super_block *sb, u64 block, int count);
block              62 fs/omfs/omfs.h extern struct buffer_head *omfs_bread(struct super_block *sb, sector_t block);
block              65 fs/omfs/omfs.h extern int omfs_reserve_block(struct super_block *sb, sector_t block);
block             127 fs/pstore/ram_core.c 	uint8_t *block;
block             136 fs/pstore/ram_core.c 	block = buffer->data + (start & ~(ecc_block_size - 1));
block             140 fs/pstore/ram_core.c 		if (block + ecc_block_size > buffer_end)
block             141 fs/pstore/ram_core.c 			size = buffer_end - block;
block             142 fs/pstore/ram_core.c 		persistent_ram_encode_rs8(prz, block, size, par);
block             143 fs/pstore/ram_core.c 		block += ecc_block_size;
block             145 fs/pstore/ram_core.c 	} while (block < buffer->data + start + count);
block             162 fs/pstore/ram_core.c 	uint8_t *block;
block             168 fs/pstore/ram_core.c 	block = buffer->data;
block             170 fs/pstore/ram_core.c 	while (block < buffer->data + buffer_size(prz)) {
block             173 fs/pstore/ram_core.c 		if (block + size > buffer->data + prz->buffer_size)
block             174 fs/pstore/ram_core.c 			size = buffer->data + prz->buffer_size - block;
block             175 fs/pstore/ram_core.c 		numerr = persistent_ram_decode_rs8(prz, block, size, par);
block             177 fs/pstore/ram_core.c 			pr_devel("error in block %p, %d\n", block, numerr);
block             180 fs/pstore/ram_core.c 			pr_devel("uncorrectable error in block %p\n", block);
block             183 fs/pstore/ram_core.c 		block += prz->ecc_info.block_size;
block              87 fs/qnx4/inode.c 	u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset);
block              89 fs/qnx4/inode.c 	if (block) {
block             109 fs/qnx4/inode.c 			block = try_extent(&xblk->xblk_xtnts[ix], &offset);
block             110 fs/qnx4/inode.c 			if (block) {
block             125 fs/qnx4/inode.c 	QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
block             126 fs/qnx4/inode.c 	return block;
block             254 fs/qnx4/inode.c static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
block             256 fs/qnx4/inode.c 	return generic_block_bmap(mapping,block,qnx4_get_block);
block             267 fs/qnx4/inode.c 	int block;
block             288 fs/qnx4/inode.c 	block = ino / QNX4_INODES_PER_BLOCK;
block             290 fs/qnx4/inode.c 	if (!(bh = sb_bread(sb, block))) {
block              60 fs/qnx4/namei.c 	unsigned long block, offset, blkofs;
block              65 fs/qnx4/namei.c 	block = offset = blkofs = 0;
block              68 fs/qnx4/namei.c 			block = qnx4_block_map(dir, blkofs);
block              69 fs/qnx4/namei.c 			if (block)
block              70 fs/qnx4/namei.c 				bh = sb_bread(dir->i_sb, block);
block              78 fs/qnx4/namei.c 			*ino = block * QNX4_INODES_PER_BLOCK +
block              31 fs/qnx4/qnx4.h extern int qnx4_is_free(struct super_block *sb, long block);
block              64 fs/qnx6/inode.c static unsigned qnx6_get_devblock(struct super_block *sb, __fs32 block)
block              67 fs/qnx6/inode.c 	return fs32_to_cpu(sbi, block) + sbi->s_blks_off;
block             117 fs/qnx6/inode.c 	unsigned block = 0;
block             135 fs/qnx6/inode.c 	block = qnx6_get_devblock(s, ei->di_block_ptr[levelptr]);
block             138 fs/qnx6/inode.c 		bh = sb_bread(s, block);
block             140 fs/qnx6/inode.c 			pr_err("Error reading block (%u)\n", block);
block             150 fs/qnx6/inode.c 		block = qnx6_get_devblock(s, ptr);
block             153 fs/qnx6/inode.c 	return block;
block             496 fs/qnx6/inode.c static sector_t qnx6_bmap(struct address_space *mapping, sector_t block)
block             498 fs/qnx6/inode.c 	return generic_block_bmap(mapping, block, qnx6_get_block);
block              49 fs/reiserfs/bitmap.c 				   b_blocknr_t block,
block              57 fs/reiserfs/bitmap.c 	*bmap_nr = block >> (s->s_blocksize_bits + 3);
block              59 fs/reiserfs/bitmap.c 	*offset = block & ((s->s_blocksize << 3) - 1);
block              62 fs/reiserfs/bitmap.c int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
block              67 fs/reiserfs/bitmap.c 	if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
block              70 fs/reiserfs/bitmap.c 			       block, SB_BLOCK_COUNT(s));
block              74 fs/reiserfs/bitmap.c 	get_bit_address(s, block, &bmap, &offset);
block              83 fs/reiserfs/bitmap.c 		if (block >= bmap1 &&
block              84 fs/reiserfs/bitmap.c 		    block <= bmap1 + bmap_count) {
block              87 fs/reiserfs/bitmap.c 				       block, bmap_count);
block              94 fs/reiserfs/bitmap.c 				       block, bmap_count);
block             102 fs/reiserfs/bitmap.c 			       block, bmap);
block             106 fs/reiserfs/bitmap.c 	if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
block             417 fs/reiserfs/bitmap.c 				 struct inode *inode, b_blocknr_t block,
block             432 fs/reiserfs/bitmap.c 	get_bit_address(s, block, &nr, &offset);
block             436 fs/reiserfs/bitmap.c 			       block);
block             449 fs/reiserfs/bitmap.c 			       "block %lu: bit already cleared", block);
block             468 fs/reiserfs/bitmap.c 			 struct inode *inode, b_blocknr_t block,
block             475 fs/reiserfs/bitmap.c 	if (!is_reusable(s, block, 1))
block             478 fs/reiserfs/bitmap.c 	if (block > sb_block_count(REISERFS_SB(s)->s_rs)) {
block             482 fs/reiserfs/bitmap.c 			       block, sb_block_count(REISERFS_SB(s)->s_rs));
block             486 fs/reiserfs/bitmap.c 	journal_mark_freed(th, s, block);
block             487 fs/reiserfs/bitmap.c 	_reiserfs_free_block(th, inode, block, for_unformatted);
block             492 fs/reiserfs/bitmap.c 					 struct inode *inode, b_blocknr_t block)
block             497 fs/reiserfs/bitmap.c 	if (!is_reusable(th->t_super, block, 1))
block             499 fs/reiserfs/bitmap.c 	_reiserfs_free_block(th, inode, block, 1);
block             952 fs/reiserfs/bitmap.c 	return hint->block ==
block            1163 fs/reiserfs/bitmap.c 					   !hint->formatted_node, hint->block);
block            1418 fs/reiserfs/bitmap.c 	b_blocknr_t block = (sb->s_blocksize << 3) * bitmap;
block            1428 fs/reiserfs/bitmap.c 		block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
block            1430 fs/reiserfs/bitmap.c 		block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
block            1432 fs/reiserfs/bitmap.c 	bh = sb_bread(sb, block);
block            1435 fs/reiserfs/bitmap.c 		                 "reading failed", __func__, block);
block            1609 fs/reiserfs/do_balan.c int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
block             232 fs/reiserfs/inode.c 					b_blocknr_t block, struct inode *inode)
block             234 fs/reiserfs/inode.c 	map_bh(bh, inode->i_sb, block);
block             241 fs/reiserfs/inode.c static int file_capable(struct inode *inode, sector_t block)
block             246 fs/reiserfs/inode.c 	    block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
block             285 fs/reiserfs/inode.c static int _get_block_create_0(struct inode *inode, sector_t block,
block             302 fs/reiserfs/inode.c 		     (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
block             472 fs/reiserfs/inode.c static int reiserfs_bmap(struct inode *inode, sector_t block,
block             475 fs/reiserfs/inode.c 	if (!file_capable(inode, block))
block             480 fs/reiserfs/inode.c 	_get_block_create_0(inode, block, bh_result, 0);
block             503 fs/reiserfs/inode.c static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
block             507 fs/reiserfs/inode.c 	return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
block             640 fs/reiserfs/inode.c 				  sector_t block,
block             650 fs/reiserfs/inode.c 						  path, block);
block             654 fs/reiserfs/inode.c 					 block);
block             657 fs/reiserfs/inode.c int reiserfs_get_block(struct inode *inode, sector_t block,
block             686 fs/reiserfs/inode.c 	    (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
block             691 fs/reiserfs/inode.c 	if (!file_capable(inode, block)) {
block             703 fs/reiserfs/inode.c 		ret = _get_block_create_0(inode, block, bh_result,
block             764 fs/reiserfs/inode.c 		    _allocate_block(th, block, inode, &allocated_block_nr,
block             778 fs/reiserfs/inode.c 			    _allocate_block(th, block, inode,
block            2370 fs/reiserfs/inode.c 				   unsigned long block)
block            2382 fs/reiserfs/inode.c 	loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
block            2496 fs/reiserfs/inode.c 		retval = reiserfs_get_block(inode, block, bh_result,
block            2533 fs/reiserfs/inode.c 	unsigned long block;
block            2579 fs/reiserfs/inode.c 	block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
block            2583 fs/reiserfs/inode.c 		if (block > last_block) {
block            2600 fs/reiserfs/inode.c 			if ((error = map_block_for_writepage(inode, bh, block))) {
block            2605 fs/reiserfs/inode.c 		block++;
block            2890 fs/reiserfs/inode.c static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
block            2892 fs/reiserfs/inode.c 	return generic_block_bmap(as, block, reiserfs_bmap);
block             212 fs/reiserfs/journal.c 				  b_blocknr_t block,
block             215 fs/reiserfs/journal.c 	unsigned int bmap_nr = block / (sb->s_blocksize << 3);
block             216 fs/reiserfs/journal.c 	unsigned int bit_nr = block % (sb->s_blocksize << 3);
block            1843 fs/reiserfs/journal.c 			 unsigned long block, int remove_freed)
block            1848 fs/reiserfs/journal.c 	head = &(journal_hash(table, sb, block));
block            1854 fs/reiserfs/journal.c 		if (cur->blocknr == block && cur->sb == sb
block            2321 fs/reiserfs/journal.c 					   b_blocknr_t block, int bufsize,
block            2329 fs/reiserfs/journal.c 	bh = __getblk(dev, block, bufsize);
block            2333 fs/reiserfs/journal.c 	if (block + BUFNR > max_block) {
block            2334 fs/reiserfs/journal.c 		blocks = max_block - block;
block            2339 fs/reiserfs/journal.c 		bh = __getblk(dev, block + i, bufsize);
block            1098 fs/reiserfs/reiserfs.h #define is_block_in_log_or_reserved_area(s, block) \
block            1099 fs/reiserfs/reiserfs.h          block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \
block            1100 fs/reiserfs/reiserfs.h          && block < SB_JOURNAL_1st_RESERVED_BLOCK(s) +  \
block            2809 fs/reiserfs/reiserfs.h #define _jhashfn(sb,block)	\
block            2811 fs/reiserfs/reiserfs.h 	 (((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12))))
block            2812 fs/reiserfs/reiserfs.h #define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
block            2815 fs/reiserfs/reiserfs.h #define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
block            2816 fs/reiserfs/reiserfs.h #define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
block            2817 fs/reiserfs/reiserfs.h #define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
block            3071 fs/reiserfs/reiserfs.h int reiserfs_get_block(struct inode *inode, sector_t block,
block            3270 fs/reiserfs/reiserfs.h 	sector_t block;		/* file offset, in blocks */
block            3325 fs/reiserfs/reiserfs.h int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
block            3339 fs/reiserfs/reiserfs.h 		.block = 0,
block            3350 fs/reiserfs/reiserfs.h 					    sector_t block)
block            3356 fs/reiserfs/reiserfs.h 		.block = block,
block            3368 fs/reiserfs/reiserfs.h 					     sector_t block)
block            3374 fs/reiserfs/reiserfs.h 		.block = block,
block            1082 fs/reiserfs/stree.c 		    __u32 block;
block            1095 fs/reiserfs/stree.c 		    block = get_block_num(unfm, 0);
block            1097 fs/reiserfs/stree.c 		    if (block != 0) {
block            1101 fs/reiserfs/stree.c 			reiserfs_free_block(th, inode, block, 1);
block              53 fs/squashfs/cache.c 	struct squashfs_cache *cache, u64 block, int length)
block              62 fs/squashfs/cache.c 			if (cache->entry[i].block == block) {
block             103 fs/squashfs/cache.c 			entry->block = block;
block             110 fs/squashfs/cache.c 			entry->length = squashfs_read_data(sb, block, length,
block             161 fs/squashfs/cache.c 		cache->name, i, entry->block, entry->refcount, entry->error);
block             165 fs/squashfs/cache.c 							block);
block             257 fs/squashfs/cache.c 		entry->block = SQUASHFS_INVALID_BLK;
block             332 fs/squashfs/cache.c 		u64 *block, int *offset, int length)
block             338 fs/squashfs/cache.c 	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
block             344 fs/squashfs/cache.c 		entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
block             360 fs/squashfs/cache.c 			*block = entry->next_index;
block             406 fs/squashfs/cache.c void *squashfs_read_table(struct super_block *sb, u64 block, int length)
block             432 fs/squashfs/cache.c 	res = squashfs_read_data(sb, block, length |
block             101 fs/squashfs/dir.c 	u64 block = squashfs_i(inode)->start + msblk->directory_table;
block             107 fs/squashfs/dir.c 	TRACE("Entered squashfs_readdir [%llx:%x]\n", block, offset);
block             144 fs/squashfs/dir.c 	length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
block             154 fs/squashfs/dir.c 		err = squashfs_read_metadata(inode->i_sb, &dirh, &block,
block             170 fs/squashfs/dir.c 			err = squashfs_read_metadata(inode->i_sb, dire, &block,
block             182 fs/squashfs/dir.c 					&block, &offset, size);
block             213 fs/squashfs/dir.c 	ERROR("Unable to read directory block [%llx:%x]\n", block, offset);
block             164 fs/squashfs/file.c 	long long block = 0;
block             189 fs/squashfs/file.c 			block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
block             195 fs/squashfs/file.c 	return block;
block             326 fs/squashfs/file.c static int read_blocklist(struct inode *inode, int index, u64 *block)
block             332 fs/squashfs/file.c 	int res = fill_meta_index(inode, index, &start, &offset, block);
block             336 fs/squashfs/file.c 			*block);
block             351 fs/squashfs/file.c 		*block += blks;
block             468 fs/squashfs/file.c 		u64 block = 0;
block             469 fs/squashfs/file.c 		int bsize = read_blocklist(inode, index, &block);
block             476 fs/squashfs/file.c 			res = squashfs_readpage_block(page, block, bsize, expected);
block              21 fs/squashfs/file_cache.c int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
block              25 fs/squashfs/file_cache.c 		block, bsize);
block              29 fs/squashfs/file_cache.c 		ERROR("Unable to read page, block %llx, size %x\n", block,
block              21 fs/squashfs/file_direct.c static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
block              25 fs/squashfs/file_direct.c int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
block              84 fs/squashfs/file_direct.c 		res = squashfs_read_cache(target_page, block, bsize, pages,
block              93 fs/squashfs/file_direct.c 	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
block             144 fs/squashfs/file_direct.c static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
block             149 fs/squashfs/file_direct.c 						 block, bsize);
block             153 fs/squashfs/file_direct.c 		ERROR("Unable to read page, block %llx, size %x\n", block,
block              39 fs/squashfs/fragment.c 	int block, offset, size;
block              45 fs/squashfs/fragment.c 	block = SQUASHFS_FRAGMENT_INDEX(fragment);
block              48 fs/squashfs/fragment.c 	start_block = le64_to_cpu(msblk->fragment_index[block]);
block              36 fs/squashfs/id.c 	int block = SQUASHFS_ID_BLOCK(index);
block              38 fs/squashfs/id.c 	u64 start_block = le64_to_cpu(msblk->id_table[block]);
block             104 fs/squashfs/inode.c 	u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
block             115 fs/squashfs/inode.c 	err = squashfs_read_metadata(sb, sqshb_ino, &block,
block             124 fs/squashfs/inode.c 	block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
block             135 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             163 fs/squashfs/inode.c 		squashfs_i(inode)->block_list_start = block;
block             169 fs/squashfs/inode.c 			offset, squashfs_i(inode)->start, block, offset);
block             178 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             210 fs/squashfs/inode.c 		squashfs_i(inode)->block_list_start = block;
block             216 fs/squashfs/inode.c 			offset, squashfs_i(inode)->start, block, offset);
block             222 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             246 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             259 fs/squashfs/inode.c 		squashfs_i(inode)->dir_idx_start = block;
block             274 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             285 fs/squashfs/inode.c 		squashfs_i(inode)->start = block;
block             291 fs/squashfs/inode.c 			err = squashfs_read_metadata(sb, NULL, &block,
block             295 fs/squashfs/inode.c 			err = squashfs_read_metadata(sb, &xattr, &block,
block             304 fs/squashfs/inode.c 				block, offset);
block             312 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             334 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             357 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             374 fs/squashfs/inode.c 		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
block             135 fs/squashfs/namei.c 	u64 block = squashfs_i(dir)->start + msblk->directory_table;
block             140 fs/squashfs/namei.c 	TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
block             153 fs/squashfs/namei.c 	length = get_dir_index_using_name(dir->i_sb, &block, &offset,
block             162 fs/squashfs/namei.c 		err = squashfs_read_metadata(dir->i_sb, &dirh, &block,
block             178 fs/squashfs/namei.c 			err = squashfs_read_metadata(dir->i_sb, dire, &block,
block             190 fs/squashfs/namei.c 					&block, &offset, size);
block              30 fs/squashfs/squashfs_fs_sb.h 	u64			block;
block              39 fs/squashfs/symlink.c 	u64 block = squashfs_i(inode)->start;
block              47 fs/squashfs/symlink.c 			"%llx, offset %x\n", page->index, block, offset);
block              53 fs/squashfs/symlink.c 		bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
block              71 fs/squashfs/symlink.c 		entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
block              86 fs/squashfs/symlink.c 			block = entry->next_index;
block              32 fs/squashfs/xattr_id.c 	int block = SQUASHFS_XATTR_BLOCK(index);
block              34 fs/squashfs/xattr_id.c 	u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
block              49 fs/sysv/balloc.c 	unsigned block = fs32_to_cpu(sbi, nr);
block              59 fs/sysv/balloc.c 	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
block              77 fs/sysv/balloc.c 		block += sbi->s_block_base;
block              78 fs/sysv/balloc.c 		bh = sb_getblk(sb, block);
block             103 fs/sysv/balloc.c 	unsigned int block;
block             117 fs/sysv/balloc.c 	block = fs32_to_cpu(sbi, nr);
block             121 fs/sysv/balloc.c 	if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
block             123 fs/sysv/balloc.c 			block);
block             130 fs/sysv/balloc.c 		block += sbi->s_block_base;
block             131 fs/sysv/balloc.c 		if (!(bh = sb_bread(sb, block))) {
block             166 fs/sysv/balloc.c 	unsigned block;
block             197 fs/sysv/balloc.c 		block = fs32_to_cpu(sbi, zone);
block             201 fs/sysv/balloc.c 		if (block < sbi->s_firstdatazone || block >= sbi->s_nzones)
block             203 fs/sysv/balloc.c 		block += sbi->s_block_base;
block             204 fs/sysv/balloc.c 		bh = sb_bread(sb, block);
block             220 fs/sysv/balloc.c 		block);
block              61 fs/sysv/ialloc.c 	int block = sbi->s_firstinodezone + sbi->s_block_base;
block              63 fs/sysv/ialloc.c 	block += (ino-1) >> sbi->s_inodes_per_block_bits;
block              64 fs/sysv/ialloc.c 	*bh = sb_bread(sb, block);
block             178 fs/sysv/inode.c 	unsigned int block;
block             213 fs/sysv/inode.c 	for (block = 0; block < 10+1+1+1; block++)
block             214 fs/sysv/inode.c 		read3byte(sbi, &raw_inode->i_data[3*block],
block             215 fs/sysv/inode.c 				(u8 *)&si->i_data[block]);
block             238 fs/sysv/inode.c 	unsigned int ino, block;
block             265 fs/sysv/inode.c 	for (block = 0; block < 10+1+1+1; block++)
block             266 fs/sysv/inode.c 		write3byte(sbi, (u8 *)&si->i_data[block],
block             267 fs/sysv/inode.c 			&raw_inode->i_data[3*block]);
block              23 fs/sysv/itree.c static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
block              32 fs/sysv/itree.c 	if (block < 0) {
block              34 fs/sysv/itree.c 	} else if (block < DIRECT) {
block              35 fs/sysv/itree.c 		offsets[n++] = block;
block              36 fs/sysv/itree.c 	} else if ( (block -= DIRECT) < indirect_blocks) {
block              38 fs/sysv/itree.c 		offsets[n++] = block;
block              39 fs/sysv/itree.c 	} else if ((block -= indirect_blocks) < double_blocks) {
block              41 fs/sysv/itree.c 		offsets[n++] = block >> ptrs_bits;
block              42 fs/sysv/itree.c 		offsets[n++] = block & (indirect_blocks - 1);
block              43 fs/sysv/itree.c 	} else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) {
block              45 fs/sysv/itree.c 		offsets[n++] = block >> (ptrs_bits * 2);
block              46 fs/sysv/itree.c 		offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1);
block              47 fs/sysv/itree.c 		offsets[n++] = block & (indirect_blocks - 1);
block             103 fs/sysv/itree.c 		int block = block_to_cpu(SYSV_SB(sb), p->key);
block             104 fs/sysv/itree.c 		bh = sb_bread(sb, block);
block             345 fs/sysv/itree.c 			int block;
block             350 fs/sysv/itree.c 			block = block_to_cpu(SYSV_SB(sb), nr);
block             351 fs/sysv/itree.c 			bh = sb_bread(sb, block);
block             492 fs/sysv/itree.c static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
block             494 fs/sysv/itree.c 	return generic_block_bmap(mapping,block,get_block);
block             274 fs/sysv/super.c 	int block;
block             377 fs/sysv/super.c 		bh = sb_bread(sb, flavours[i].block);
block              28 fs/ubifs/crypto.c 		  unsigned int in_len, unsigned int *out_len, int block)
block              43 fs/ubifs/crypto.c 					    offset_in_page(p), block, GFP_NOFS);
block              54 fs/ubifs/crypto.c 		  unsigned int *out_len, int block)
block              69 fs/ubifs/crypto.c 					    block);
block              45 fs/ubifs/file.c static int read_block(struct inode *inode, void *addr, unsigned int block,
block              53 fs/ubifs/file.c 	data_key_init(c, &key, inode->i_ino, block);
block              71 fs/ubifs/file.c 		err = ubifs_decrypt(inode, dn, &dlen, block);
block              94 fs/ubifs/file.c 		  block, inode->i_ino);
block             103 fs/ubifs/file.c 	unsigned int block, beyond;
block             116 fs/ubifs/file.c 	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
block             118 fs/ubifs/file.c 	if (block >= beyond) {
block             135 fs/ubifs/file.c 		if (block >= beyond) {
block             140 fs/ubifs/file.c 			ret = read_block(inode, addr, block, dn);
block             145 fs/ubifs/file.c 			} else if (block + 1 == beyond) {
block             155 fs/ubifs/file.c 		block += 1;
block             905 fs/ubifs/file.c 	unsigned int block;
block             922 fs/ubifs/file.c 	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
block             926 fs/ubifs/file.c 		data_key_init(c, &key, inode->i_ino, block);
block             932 fs/ubifs/file.c 		block += 1;
block            1438 fs/ubifs/journal.c 			      unsigned int block, struct ubifs_data_node *dn,
block            1453 fs/ubifs/journal.c 		err = ubifs_decrypt(inode, dn, &dlen, block);
block            1469 fs/ubifs/journal.c 		err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
block             269 fs/ubifs/key.h 				 unsigned int block)
block             271 fs/ubifs/key.h 	ubifs_assert(c, !(block & ~UBIFS_S_KEY_BLOCK_MASK));
block             273 fs/ubifs/key.h 	key->u32[1] = block | (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS);
block            1519 fs/ubifs/tnc.c 	unsigned int block = key_block(c, &bu->key);
block            1586 fs/ubifs/tnc.c 		bu->blk_cnt += (next_block - block - 1);
block            1589 fs/ubifs/tnc.c 		block = next_block;
block            1627 fs/ubifs/tnc.c 	block = key_block(c, &bu->key) + bu->blk_cnt;
block            1628 fs/ubifs/tnc.c 	block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
block            1630 fs/ubifs/tnc.c 		if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
block            3483 fs/ubifs/tnc.c 	unsigned int block;
block            3490 fs/ubifs/tnc.c 	block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
block            3491 fs/ubifs/tnc.c 	data_key_init(c, &from_key, inode->i_ino, block);
block            3518 fs/ubifs/tnc.c 	block = key_block(c, key);
block            3521 fs/ubifs/tnc.c 		  ((loff_t)block) << UBIFS_BLOCK_SHIFT);
block            2074 fs/ubifs/ubifs.h 				int block)
block            2082 fs/ubifs/ubifs.h 				unsigned int *out_len, int block)
block            2091 fs/ubifs/ubifs.h 		  unsigned int in_len, unsigned int *out_len, int block);
block            2093 fs/ubifs/ubifs.h 		  unsigned int *out_len, int block);
block              35 fs/udf/balloc.c 			     struct udf_bitmap *bitmap, unsigned int block,
block              45 fs/udf/balloc.c 	bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
block             114 fs/udf/balloc.c 	unsigned long block;
block             132 fs/udf/balloc.c 	block = bloc->logicalBlockNum + offset +
block             137 fs/udf/balloc.c 		block_group = block >> (sb->s_blocksize_bits + 3);
block             138 fs/udf/balloc.c 		bit = block % (sb->s_blocksize << 3);
block             162 fs/udf/balloc.c 			block += count;
block             178 fs/udf/balloc.c 	int bit, block, block_group;
block             192 fs/udf/balloc.c 		block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
block             193 fs/udf/balloc.c 		block_group = block >> (sb->s_blocksize_bits + 3);
block             200 fs/udf/balloc.c 		bit = block % (sb->s_blocksize << 3);
block             208 fs/udf/balloc.c 			block++;
block             225 fs/udf/balloc.c 	udf_pblk_t block;
block             240 fs/udf/balloc.c 	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
block             241 fs/udf/balloc.c 	block_group = block >> (sb->s_blocksize_bits + 3);
block             252 fs/udf/balloc.c 		bit = block % (sb->s_blocksize << 3);
block             391 fs/udf/balloc.c 	epos.block = oepos.block = iinfo->i_location;
block             436 fs/udf/balloc.c 			oepos.block = epos.block;
block             523 fs/udf/balloc.c 	epos.block = iinfo->i_location;
block             591 fs/udf/balloc.c 	epos.block = iinfo->i_location;
block             614 fs/udf/balloc.c 			goal_epos.block = epos.block;
block             703 fs/udf/balloc.c 	udf_pblk_t block;
block             706 fs/udf/balloc.c 		block = udf_bitmap_new_block(sb,
block             710 fs/udf/balloc.c 		block = udf_table_new_block(sb,
block             717 fs/udf/balloc.c 	if (inode && block)
block             719 fs/udf/balloc.c 	return block;
block              46 fs/udf/dir.c   	udf_pblk_t block, iblock;
block              88 fs/udf/dir.c   		block = udf_get_lb_pblock(sb, &eloc, offset);
block              99 fs/udf/dir.c   		if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
block             109 fs/udf/dir.c   				block = udf_get_lb_pblock(sb, &eloc, offset + i);
block             110 fs/udf/dir.c   				tmp = udf_tgetblk(sb, block);
block              30 fs/udf/directory.c 	udf_pblk_t block;
block              62 fs/udf/directory.c 		block = udf_get_lb_pblock(dir->i_sb, eloc, *offset);
block              72 fs/udf/directory.c 		fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
block              82 fs/udf/directory.c 				block = udf_get_lb_pblock(dir->i_sb, eloc,
block              84 fs/udf/directory.c 				tmp = udf_tgetblk(dir->i_sb, block);
block             120 fs/udf/directory.c 		block = udf_get_lb_pblock(dir->i_sb, eloc, *offset);
block             132 fs/udf/directory.c 		fibh->ebh = udf_tread(dir->i_sb, block);
block              53 fs/udf/ialloc.c 	udf_pblk_t block;
block              85 fs/udf/ialloc.c 	block = udf_new_block(dir->i_sb, NULL,
block             112 fs/udf/ialloc.c 	iinfo->i_location.logicalBlockNum = block;
block             230 fs/udf/inode.c static sector_t udf_bmap(struct address_space *mapping, sector_t block)
block             232 fs/udf/inode.c 	return generic_block_bmap(mapping, block, udf_get_block);
block             327 fs/udf/inode.c 					    udf_pblk_t *block, int *err)
block             353 fs/udf/inode.c 	*block = udf_new_block(inode->i_sb, inode,
block             356 fs/udf/inode.c 	if (!(*block))
block             358 fs/udf/inode.c 	newblock = udf_get_pblock(inode->i_sb, *block,
block             386 fs/udf/inode.c 		sfi->descTag.tagLocation = cpu_to_le32(*block);
block             403 fs/udf/inode.c 	eloc.logicalBlockNum = *block;
block             408 fs/udf/inode.c 	epos.block = iinfo->i_location;
block             418 fs/udf/inode.c static int udf_get_block(struct inode *inode, sector_t block,
block             426 fs/udf/inode.c 		phys = udf_block_map(inode, block);
block             437 fs/udf/inode.c 	if (block == iinfo->i_next_alloc_block + 1) {
block             443 fs/udf/inode.c 	phys = inode_getblk(inode, block, &err, &new);
block             456 fs/udf/inode.c static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block,
block             464 fs/udf/inode.c 	*err = udf_get_block(inode, block, &dummy, create);
block             688 fs/udf/inode.c static sector_t inode_getblk(struct inode *inode, sector_t block,
block             709 fs/udf/inode.c 	prev_epos.block = iinfo->i_location;
block             712 fs/udf/inode.c 	b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
block             731 fs/udf/inode.c 		prev_epos.block = cur_epos.block;
block             732 fs/udf/inode.c 		cur_epos.block = next_epos.block;
block             852 fs/udf/inode.c 		if (iinfo->i_next_alloc_block == block)
block             900 fs/udf/inode.c 	iinfo->i_next_alloc_block = block;
block            1202 fs/udf/inode.c struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
block            1207 fs/udf/inode.c 	bh = udf_getblk(inode, block, create, err);
block            1902 fs/udf/inode.c 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
block            1903 fs/udf/inode.c 	struct inode *inode = iget_locked(sb, block);
block            1923 fs/udf/inode.c int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
block            1940 fs/udf/inode.c 	neloc.logicalBlockNum = block;
block            1941 fs/udf/inode.c 	neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
block            1955 fs/udf/inode.c 				cpu_to_le32(epos->block.logicalBlockNum);
block            1962 fs/udf/inode.c 	udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
block            1965 fs/udf/inode.c 	nepos.block = neloc;
block            1983 fs/udf/inode.c 		udf_write_aext(inode, epos, &nepos.block,
block            1986 fs/udf/inode.c 		__udf_add_aext(inode, epos, &nepos.block,
block            2068 fs/udf/inode.c 					  epos->block.partitionReferenceNum,
block            2069 fs/udf/inode.c 					  epos->block.logicalBlockNum, &err);
block            2147 fs/udf/inode.c 		udf_pblk_t block;
block            2156 fs/udf/inode.c 		epos->block = *eloc;
block            2159 fs/udf/inode.c 		block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
block            2160 fs/udf/inode.c 		epos->bh = udf_tread(inode->i_sb, block);
block            2162 fs/udf/inode.c 			udf_debug("reading block %u failed!\n", block);
block            2275 fs/udf/inode.c 			oepos.block = epos.block;
block            2286 fs/udf/inode.c 		udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
block            2329 fs/udf/inode.c int8_t inode_bmap(struct inode *inode, sector_t block,
block            2334 fs/udf/inode.c 	loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
block            2341 fs/udf/inode.c 		pos->block = iinfo->i_location;
block            2361 fs/udf/inode.c udf_pblk_t udf_block_map(struct inode *inode, sector_t block)
block            2371 fs/udf/inode.c 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
block              31 fs/udf/misc.c  struct buffer_head *udf_tgetblk(struct super_block *sb, udf_pblk_t block)
block              34 fs/udf/misc.c  		return sb_getblk(sb, udf_fixed_to_variable(block));
block              36 fs/udf/misc.c  		return sb_getblk(sb, block);
block              39 fs/udf/misc.c  struct buffer_head *udf_tread(struct super_block *sb, udf_pblk_t block)
block              42 fs/udf/misc.c  		return sb_bread(sb, udf_fixed_to_variable(block));
block              44 fs/udf/misc.c  		return sb_bread(sb, block);
block             199 fs/udf/misc.c  struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
block             207 fs/udf/misc.c  	if (block == 0xFFFFFFFF)
block             210 fs/udf/misc.c  	bh = udf_tread(sb, block);
block             213 fs/udf/misc.c  			block, location);
block             223 fs/udf/misc.c  			  block, le32_to_cpu(tag_p->tagLocation), location);
block             231 fs/udf/misc.c  			block, checksum, tag_p->tagChecksum);
block             239 fs/udf/misc.c  			le16_to_cpu(tag_p->descVersion), block);
block             250 fs/udf/misc.c  	udf_debug("Crc failure block %u: crc = %u, crclen = %u\n", block,
block             167 fs/udf/namei.c 	udf_pblk_t block;
block             195 fs/udf/namei.c 		block = udf_get_lb_pblock(sb, &eloc, offset);
block             204 fs/udf/namei.c 		fibh->sbh = fibh->ebh = udf_tread(sb, block);
block             339 fs/udf/namei.c 	udf_pblk_t block;
block             378 fs/udf/namei.c 			block = udf_get_lb_pblock(dir->i_sb,
block             383 fs/udf/namei.c 		block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
block             392 fs/udf/namei.c 		fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
block             398 fs/udf/namei.c 		block = dinfo->i_location.logicalBlockNum;
block             442 fs/udf/namei.c 				udf_expand_dir_adinicb(dir, &block, err);
block             445 fs/udf/namei.c 		epos.block = dinfo->i_location;
block             461 fs/udf/namei.c 			block = dinfo->i_location.logicalBlockNum;
block             468 fs/udf/namei.c 			block = eloc.logicalBlockNum +
block             492 fs/udf/namei.c 		block = eloc.logicalBlockNum + ((elen - 1) >>
block             501 fs/udf/namei.c 		epos.block = dinfo->i_location;
block             509 fs/udf/namei.c 			block = eloc.logicalBlockNum + ((elen - 1) >>
block             523 fs/udf/namei.c 		udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
block             526 fs/udf/namei.c 		udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
block             727 fs/udf/namei.c 	udf_pblk_t block;
block             742 fs/udf/namei.c 		block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
block             751 fs/udf/namei.c 		fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block);
block             891 fs/udf/namei.c 	udf_pblk_t block;
block             916 fs/udf/namei.c 		block = udf_new_block(sb, inode,
block             919 fs/udf/namei.c 		if (!block)
block             921 fs/udf/namei.c 		epos.block = iinfo->i_location;
block             924 fs/udf/namei.c 		eloc.logicalBlockNum = block;
block             932 fs/udf/namei.c 		block = udf_get_pblock(sb, block,
block             935 fs/udf/namei.c 		epos.bh = udf_tgetblk(sb, block);
block            1237 fs/udf/namei.c static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
block            1243 fs/udf/namei.c 	if (block == 0)
block            1246 fs/udf/namei.c 	loc.logicalBlockNum = block;
block            1268 fs/udf/namei.c 	return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref,
block            1299 fs/udf/namei.c 	fid->udf.block = location.logicalBlockNum;
block              29 fs/udf/partition.c uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
block              36 fs/udf/partition.c 			  block, partition, offset);
block              41 fs/udf/partition.c 		return map->s_partition_func(sb, block, partition, offset);
block              43 fs/udf/partition.c 		return map->s_partition_root + block + offset;
block              46 fs/udf/partition.c uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
block              61 fs/udf/partition.c 	if (block > vdata->s_num_entries) {
block              63 fs/udf/partition.c 			  block, vdata->s_num_entries);
block              69 fs/udf/partition.c 			vdata->s_start_offset))[block]);
block              73 fs/udf/partition.c 	if (block >= index) {
block              74 fs/udf/partition.c 		block -= index;
block              75 fs/udf/partition.c 		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
block              76 fs/udf/partition.c 		index = block % (sb->s_blocksize / sizeof(uint32_t));
block              79 fs/udf/partition.c 		index = vdata->s_start_offset / sizeof(uint32_t) + block;
block              87 fs/udf/partition.c 			  sb, block, partition, loc, index);
block             106 fs/udf/partition.c inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
block             109 fs/udf/partition.c 	return udf_get_pblock_virt15(sb, block, partition, offset);
block             112 fs/udf/partition.c uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
block             124 fs/udf/partition.c 	packet = (block + offset) & ~(sdata->s_packet_len - 1);
block             142 fs/udf/partition.c 					((block + offset) &
block             149 fs/udf/partition.c 	return map->s_partition_root + block + offset;
block             280 fs/udf/partition.c static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
block             291 fs/udf/partition.c 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
block             306 fs/udf/partition.c uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
block             324 fs/udf/partition.c 	retblk = udf_try_read_meta(inode, block, partition, offset);
block             339 fs/udf/partition.c 		retblk = udf_try_read_meta(inode, block, partition, offset);
block             851 fs/udf/super.c static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
block             864 fs/udf/super.c 	bh = udf_read_tagged(sb, block, block, &ident);
block            1232 fs/udf/super.c static int udf_load_partdesc(struct super_block *sb, sector_t block)
block            1243 fs/udf/super.c 	bh = udf_read_tagged(sb, block, block, &ident);
block            1378 fs/udf/super.c static int udf_load_logicalvol(struct super_block *sb, sector_t block,
block            1391 fs/udf/super.c 	bh = udf_read_tagged(sb, block, block, &ident);
block            1657 fs/udf/super.c 		sector_t block, sector_t lastblock,
block            1685 fs/udf/super.c 	for (; (!done && block <= lastblock); block++) {
block            1686 fs/udf/super.c 		bh = udf_read_tagged(sb, block, block, &ident);
block            1704 fs/udf/super.c 			block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
block            1708 fs/udf/super.c 			lastblock += block - 1;
block            1710 fs/udf/super.c 			block--;
block            1727 fs/udf/super.c 				curr->block = block;
block            1740 fs/udf/super.c 	if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
block            1744 fs/udf/super.c 	ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
block            1748 fs/udf/super.c 	if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
block            1750 fs/udf/super.c 				data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
block            1758 fs/udf/super.c 		ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
block            1815 fs/udf/super.c static int udf_check_anchor_block(struct super_block *sb, sector_t block,
block            1823 fs/udf/super.c 	    udf_fixed_to_variable(block) >=
block            1827 fs/udf/super.c 	bh = udf_read_tagged(sb, block, block, &ident);
block            2421 fs/udf/super.c 	udf_pblk_t block = 0, newblock;
block            2453 fs/udf/super.c 			newblock = udf_get_lb_pblock(sb, &loc, ++block);
block            2478 fs/udf/super.c 	epos.block = UDF_I(table)->i_location;
block             142 fs/udf/truncate.c 	epos.block = iinfo->i_location;
block             250 fs/udf/truncate.c 				udf_free_blocks(sb, NULL, &epos.block,
block             260 fs/udf/truncate.c 			epos.block = eloc;
block             280 fs/udf/truncate.c 		udf_free_blocks(sb, NULL, &epos.block, 0, indirect_ext_len);
block               8 fs/udf/udf_i.h 	struct kernel_lb_addr block;
block              97 fs/udf/udfdecl.h 	uint32_t block;
block             151 fs/udf/udfdecl.h 						  udf_pblk_t *block, int *err);
block             152 fs/udf/udfdecl.h extern struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
block             157 fs/udf/udfdecl.h extern udf_pblk_t udf_block_map(struct inode *inode, sector_t block);
block             160 fs/udf/udfdecl.h extern int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
block             177 fs/udf/udfdecl.h 					udf_pblk_t block);
block             178 fs/udf/udfdecl.h extern struct buffer_head *udf_tread(struct super_block *sb, udf_pblk_t block);
block             228 fs/ufs/inode.c 	unsigned block = ufs_fragstoblks(lastfrag);
block             238 fs/ufs/inode.c 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
block             523 fs/ufs/inode.c static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
block             525 fs/ufs/inode.c 	return generic_block_bmap(mapping,block,ufs_getfrag_block);
block             989 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
block             990 fs/ufs/inode.c 			if (block)
block             991 fs/ufs/inode.c 				free_full_branch(inode, block, depth);
block             998 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
block             999 fs/ufs/inode.c 			if (block)
block            1000 fs/ufs/inode.c 				free_data(&ctx, block, uspi->s_fpb);
block            1018 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
block            1019 fs/ufs/inode.c 			if (block) {
block            1024 fs/ufs/inode.c 				free_full_branch(inode, block, depth);
block            1032 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
block            1033 fs/ufs/inode.c 			if (block) {
block            1038 fs/ufs/inode.c 				free_data(&ctx, block, uspi->s_fpb);
block            1128 fs/ufs/inode.c 	u64 block;
block            1151 fs/ufs/inode.c 			block = ufs_data_ptr_to_cpu(sb, p);
block            1152 fs/ufs/inode.c 			if (!block)
block            1154 fs/ufs/inode.c 			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
block            1168 fs/ufs/inode.c 		block = ufs_data_ptr_to_cpu(sb, p);
block            1169 fs/ufs/inode.c 		if (block) {
block            1173 fs/ufs/inode.c 			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
block             466 fs/ufs/util.h  #define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
block             468 fs/ufs/util.h  #define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
block             470 fs/ufs/util.h  	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
block             475 fs/ufs/util.h  	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
block             477 fs/ufs/util.h  		mask = 0x0f << ((block & 0x01) << 2);
block             478 fs/ufs/util.h  		return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
block             480 fs/ufs/util.h  		mask = 0x03 << ((block & 0x03) << 1);
block             481 fs/ufs/util.h  		return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
block             483 fs/ufs/util.h  		mask = 0x01 << (block & 0x07);
block             484 fs/ufs/util.h  		return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
block             489 fs/ufs/util.h  #define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
block             491 fs/ufs/util.h  	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
block             495 fs/ufs/util.h  	    	*ubh_get_addr (ubh, begin + block) = 0x00;
block             498 fs/ufs/util.h  		*ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
block             501 fs/ufs/util.h  		*ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
block             504 fs/ufs/util.h  		*ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
block             509 fs/ufs/util.h  #define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
block             511 fs/ufs/util.h  	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
block             515 fs/ufs/util.h  	    	*ubh_get_addr(ubh, begin + block) = 0xff;
block             518 fs/ufs/util.h  		*ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
block             521 fs/ufs/util.h  		*ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
block             524 fs/ufs/util.h  		*ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
block              73 fs/xfs/libxfs/xfs_ag.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             100 fs/xfs/libxfs/xfs_ag.c 			be16_add_cpu(&block->bb_numrecs, 1);
block             117 fs/xfs/libxfs/xfs_ag.c 		block->bb_numrecs = 0;
block             152 fs/xfs/libxfs/xfs_ag.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             166 fs/xfs/libxfs/xfs_ag.c 	rrec = XFS_RMAP_REC_ADDR(block, 1);
block             173 fs/xfs/libxfs/xfs_ag.c 	rrec = XFS_RMAP_REC_ADDR(block, 2);
block             180 fs/xfs/libxfs/xfs_ag.c 	rrec = XFS_RMAP_REC_ADDR(block, 3);
block             188 fs/xfs/libxfs/xfs_ag.c 	rrec = XFS_RMAP_REC_ADDR(block, 4);
block             196 fs/xfs/libxfs/xfs_ag.c 		rrec = XFS_RMAP_REC_ADDR(block, 5);
block             201 fs/xfs/libxfs/xfs_ag.c 		be16_add_cpu(&block->bb_numrecs, 1);
block             206 fs/xfs/libxfs/xfs_ag.c 		rrec = XFS_RMAP_REC_ADDR(block,
block             207 fs/xfs/libxfs/xfs_ag.c 				be16_to_cpu(block->bb_numrecs) + 1);
block             213 fs/xfs/libxfs/xfs_ag.c 		be16_add_cpu(&block->bb_numrecs, 1);
block             111 fs/xfs/libxfs/xfs_alloc_btree.c 	struct xfs_btree_block	*block,
block             130 fs/xfs/libxfs/xfs_alloc_btree.c 		if (ptr != xfs_btree_get_numrecs(block))
block             141 fs/xfs/libxfs/xfs_alloc_btree.c 		numrecs = xfs_btree_get_numrecs(block);
block             149 fs/xfs/libxfs/xfs_alloc_btree.c 			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
block             295 fs/xfs/libxfs/xfs_alloc_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             301 fs/xfs/libxfs/xfs_alloc_btree.c 	if (!xfs_verify_magic(bp, block->bb_magic))
block             319 fs/xfs/libxfs/xfs_alloc_btree.c 	level = be16_to_cpu(block->bb_level);
block              29 fs/xfs/libxfs/xfs_alloc_btree.h #define XFS_ALLOC_REC_ADDR(mp, block, index) \
block              31 fs/xfs/libxfs/xfs_alloc_btree.h 		((char *)(block) + \
block              35 fs/xfs/libxfs/xfs_alloc_btree.h #define XFS_ALLOC_KEY_ADDR(mp, block, index) \
block              37 fs/xfs/libxfs/xfs_alloc_btree.h 		((char *)(block) + \
block              41 fs/xfs/libxfs/xfs_alloc_btree.h #define XFS_ALLOC_PTR_ADDR(mp, block, index, maxrecs) \
block              43 fs/xfs/libxfs/xfs_alloc_btree.h 		((char *)(block) + \
block             259 fs/xfs/libxfs/xfs_bmap.c 	struct xfs_btree_block	*block,
block             268 fs/xfs/libxfs/xfs_bmap.c 	ASSERT(be16_to_cpu(block->bb_level) > 0);
block             271 fs/xfs/libxfs/xfs_bmap.c 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
block             273 fs/xfs/libxfs/xfs_bmap.c 		keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
block             285 fs/xfs/libxfs/xfs_bmap.c 			pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
block             287 fs/xfs/libxfs/xfs_bmap.c 			pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
block             289 fs/xfs/libxfs/xfs_bmap.c 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
block             291 fs/xfs/libxfs/xfs_bmap.c 				thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
block             293 fs/xfs/libxfs/xfs_bmap.c 				thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
block             319 fs/xfs/libxfs/xfs_bmap.c 	struct xfs_btree_block	*block;	/* current btree block */
block             344 fs/xfs/libxfs/xfs_bmap.c 	block = ifp->if_broot;
block             348 fs/xfs/libxfs/xfs_bmap.c 	level = be16_to_cpu(block->bb_level);
block             350 fs/xfs/libxfs/xfs_bmap.c 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
block             351 fs/xfs/libxfs/xfs_bmap.c 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
block             374 fs/xfs/libxfs/xfs_bmap.c 		block = XFS_BUF_TO_BLOCK(bp);
block             383 fs/xfs/libxfs/xfs_bmap.c 		xfs_check_block(block, mp, 0, 0);
block             384 fs/xfs/libxfs/xfs_bmap.c 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
block             407 fs/xfs/libxfs/xfs_bmap.c 		num_recs = xfs_btree_get_numrecs(block);
block             413 fs/xfs/libxfs/xfs_bmap.c 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
block             421 fs/xfs/libxfs/xfs_bmap.c 		ep = XFS_BMBT_REC_ADDR(mp, block, 1);
block             428 fs/xfs/libxfs/xfs_bmap.c 			nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
block             458 fs/xfs/libxfs/xfs_bmap.c 		block = XFS_BUF_TO_BLOCK(bp);
block             656 fs/xfs/libxfs/xfs_bmap.c 	struct xfs_btree_block	*block;		/* btree root block */
block             682 fs/xfs/libxfs/xfs_bmap.c 	block = ifp->if_broot;
block             683 fs/xfs/libxfs/xfs_bmap.c 	xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
block             758 fs/xfs/libxfs/xfs_bmap.c 	kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
block             761 fs/xfs/libxfs/xfs_bmap.c 	pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
block             762 fs/xfs/libxfs/xfs_bmap.c 						be16_to_cpu(block->bb_level)));
block            1170 fs/xfs/libxfs/xfs_bmap.c 	struct xfs_btree_block	*block = ifp->if_broot;
block            1190 fs/xfs/libxfs/xfs_bmap.c 	level = be16_to_cpu(block->bb_level);
block            1195 fs/xfs/libxfs/xfs_bmap.c 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
block            1207 fs/xfs/libxfs/xfs_bmap.c 		block = XFS_BUF_TO_BLOCK(bp);
block            1210 fs/xfs/libxfs/xfs_bmap.c 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
block            1231 fs/xfs/libxfs/xfs_bmap.c 		num_recs = xfs_btree_get_numrecs(block);
block            1237 fs/xfs/libxfs/xfs_bmap.c 					__func__, block, sizeof(*block),
block            1245 fs/xfs/libxfs/xfs_bmap.c 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
block            1252 fs/xfs/libxfs/xfs_bmap.c 		frp = XFS_BMBT_REC_ADDR(mp, block, 1);
block            1280 fs/xfs/libxfs/xfs_bmap.c 		block = XFS_BUF_TO_BLOCK(bp);
block             424 fs/xfs/libxfs/xfs_bmap_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             428 fs/xfs/libxfs/xfs_bmap_btree.c 	if (!xfs_verify_magic(bp, block->bb_magic))
block             448 fs/xfs/libxfs/xfs_bmap_btree.c 	level = be16_to_cpu(block->bb_level);
block              22 fs/xfs/libxfs/xfs_bmap_btree.h #define XFS_BMBT_REC_ADDR(mp, block, index) \
block              24 fs/xfs/libxfs/xfs_bmap_btree.h 		((char *)(block) + \
block              28 fs/xfs/libxfs/xfs_bmap_btree.h #define XFS_BMBT_KEY_ADDR(mp, block, index) \
block              30 fs/xfs/libxfs/xfs_bmap_btree.h 		((char *)(block) + \
block              34 fs/xfs/libxfs/xfs_bmap_btree.h #define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \
block              36 fs/xfs/libxfs/xfs_bmap_btree.h 		((char *)(block) + \
block              41 fs/xfs/libxfs/xfs_bmap_btree.h #define XFS_BMDR_REC_ADDR(block, index) \
block              43 fs/xfs/libxfs/xfs_bmap_btree.h 		((char *)(block) + \
block              47 fs/xfs/libxfs/xfs_bmap_btree.h #define XFS_BMDR_KEY_ADDR(block, index) \
block              49 fs/xfs/libxfs/xfs_bmap_btree.h 		((char *)(block) + \
block              53 fs/xfs/libxfs/xfs_bmap_btree.h #define XFS_BMDR_PTR_ADDR(block, index, maxrecs) \
block              55 fs/xfs/libxfs/xfs_bmap_btree.h 		((char *)(block) + \
block              59 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block              68 fs/xfs/libxfs/xfs_btree.c 		if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
block              70 fs/xfs/libxfs/xfs_btree.c 		if (block->bb_u.l.bb_blkno !=
block              73 fs/xfs/libxfs/xfs_btree.c 		if (block->bb_u.l.bb_pad != cpu_to_be32(0))
block              77 fs/xfs/libxfs/xfs_btree.c 	if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
block              79 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_level) != level)
block              81 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_numrecs) >
block              84 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
block              85 fs/xfs/libxfs/xfs_btree.c 	    !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
block              88 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
block              89 fs/xfs/libxfs/xfs_btree.c 	    !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
block             100 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block             107 fs/xfs/libxfs/xfs_btree.c 	fa = __xfs_btree_check_lblock(cur, block, level, bp);
block             125 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block             134 fs/xfs/libxfs/xfs_btree.c 		if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
block             136 fs/xfs/libxfs/xfs_btree.c 		if (block->bb_u.s.bb_blkno !=
block             141 fs/xfs/libxfs/xfs_btree.c 	if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
block             143 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_level) != level)
block             145 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_numrecs) >
block             148 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
block             149 fs/xfs/libxfs/xfs_btree.c 	    !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
block             152 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
block             153 fs/xfs/libxfs/xfs_btree.c 	    !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
block             164 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block             171 fs/xfs/libxfs/xfs_btree.c 	fa = __xfs_btree_check_sblock(cur, block, level, bp);
block             188 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,	/* generic btree block pointer */
block             193 fs/xfs/libxfs/xfs_btree.c 		return xfs_btree_check_lblock(cur, block, level, bp);
block             195 fs/xfs/libxfs/xfs_btree.c 		return xfs_btree_check_sblock(cur, block, level, bp);
block             273 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             279 fs/xfs/libxfs/xfs_btree.c 		block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
block             287 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             291 fs/xfs/libxfs/xfs_btree.c 		if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
block             311 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             317 fs/xfs/libxfs/xfs_btree.c 		block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
block             325 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
block             329 fs/xfs/libxfs/xfs_btree.c 		if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
block             598 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block)
block             601 fs/xfs/libxfs/xfs_btree.c 		((char *)block + xfs_btree_rec_offset(cur, n));
block             611 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block)
block             614 fs/xfs/libxfs/xfs_btree.c 		((char *)block + xfs_btree_key_offset(cur, n));
block             624 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block)
block             627 fs/xfs/libxfs/xfs_btree.c 		((char *)block + xfs_btree_high_key_offset(cur, n));
block             637 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block)
block             639 fs/xfs/libxfs/xfs_btree.c 	int			level = xfs_btree_get_level(block);
block             641 fs/xfs/libxfs/xfs_btree.c 	ASSERT(block->bb_level != 0);
block             644 fs/xfs/libxfs/xfs_btree.c 		((char *)block + xfs_btree_ptr_offset(cur, n, level));
block             727 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* generic btree block pointer */
block             730 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block             731 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_check_block(cur, block, level, bp);
block             733 fs/xfs/libxfs/xfs_btree.c 		return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
block             735 fs/xfs/libxfs/xfs_btree.c 		return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
block             747 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* generic btree block pointer */
block             753 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block             754 fs/xfs/libxfs/xfs_btree.c 	if (xfs_btree_check_block(cur, block, level, bp))
block             759 fs/xfs/libxfs/xfs_btree.c 	if (!block->bb_numrecs)
block             777 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* generic btree block pointer */
block             783 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block             784 fs/xfs/libxfs/xfs_btree.c 	if (xfs_btree_check_block(cur, block, level, bp))
block             789 fs/xfs/libxfs/xfs_btree.c 	if (!block->bb_numrecs)
block             794 fs/xfs/libxfs/xfs_btree.c 	cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
block             908 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block)
block             911 fs/xfs/libxfs/xfs_btree.c 	xfs_fsblock_t		left = be64_to_cpu(block->bb_u.l.bb_leftsib);
block             912 fs/xfs/libxfs/xfs_btree.c 	xfs_fsblock_t		right = be64_to_cpu(block->bb_u.l.bb_rightsib);
block             933 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block *block)
block             936 fs/xfs/libxfs/xfs_btree.c 	xfs_agblock_t		left = be32_to_cpu(block->bb_u.s.bb_leftsib);
block             937 fs/xfs/libxfs/xfs_btree.c 	xfs_agblock_t		right = be32_to_cpu(block->bb_u.s.bb_rightsib);
block             965 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block             979 fs/xfs/libxfs/xfs_btree.c 	block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
block             982 fs/xfs/libxfs/xfs_btree.c 		return xfs_btree_readahead_lblock(cur, lr, block);
block             983 fs/xfs/libxfs/xfs_btree.c 	return xfs_btree_readahead_sblock(cur, lr, block);
block            1091 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            1099 fs/xfs/libxfs/xfs_btree.c 			ptr->l = block->bb_u.l.bb_rightsib;
block            1101 fs/xfs/libxfs/xfs_btree.c 			ptr->l = block->bb_u.l.bb_leftsib;
block            1104 fs/xfs/libxfs/xfs_btree.c 			ptr->s = block->bb_u.s.bb_rightsib;
block            1106 fs/xfs/libxfs/xfs_btree.c 			ptr->s = block->bb_u.s.bb_leftsib;
block            1113 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            1121 fs/xfs/libxfs/xfs_btree.c 			block->bb_u.l.bb_rightsib = ptr->l;
block            1123 fs/xfs/libxfs/xfs_btree.c 			block->bb_u.l.bb_leftsib = ptr->l;
block            1126 fs/xfs/libxfs/xfs_btree.c 			block->bb_u.s.bb_rightsib = ptr->s;
block            1128 fs/xfs/libxfs/xfs_btree.c 			block->bb_u.s.bb_leftsib = ptr->s;
block            1221 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            1231 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
block            1284 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	**block,
block            1301 fs/xfs/libxfs/xfs_btree.c 	*block = XFS_BUF_TO_BLOCK(*bpp);
block            1314 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	**block,
block            1334 fs/xfs/libxfs/xfs_btree.c 	*block = XFS_BUF_TO_BLOCK(*bpp);
block            1489 fs/xfs/libxfs/xfs_btree.c 		struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block            1490 fs/xfs/libxfs/xfs_btree.c 		int			level = xfs_btree_get_level(block);
block            1581 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            1593 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            1596 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, level, bp);
block            1602 fs/xfs/libxfs/xfs_btree.c 	if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
block            1606 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
block            1617 fs/xfs/libxfs/xfs_btree.c 		block = xfs_btree_get_block(cur, lev, &bp);
block            1620 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_check_block(cur, block, lev, bp);
block            1625 fs/xfs/libxfs/xfs_btree.c 		if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
block            1649 fs/xfs/libxfs/xfs_btree.c 	for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
block            1652 fs/xfs/libxfs/xfs_btree.c 		ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
block            1654 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
block            1683 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            1699 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            1702 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, level, bp);
block            1708 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
block            1742 fs/xfs/libxfs/xfs_btree.c 	for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
block            1745 fs/xfs/libxfs/xfs_btree.c 		ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
block            1747 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
block            1751 fs/xfs/libxfs/xfs_btree.c 		cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
block            1837 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            1842 fs/xfs/libxfs/xfs_btree.c 				xfs_btree_rec_addr(cur, keyno, block));
block            1846 fs/xfs/libxfs/xfs_btree.c 	return xfs_btree_key_addr(cur, keyno, block);
block            1859 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* current btree block */
block            1873 fs/xfs/libxfs/xfs_btree.c 	block = NULL;
block            1888 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_lookup_get_block(cur, level, pp, &block);
block            1906 fs/xfs/libxfs/xfs_btree.c 			high = xfs_btree_get_numrecs(block);
block            1912 fs/xfs/libxfs/xfs_btree.c 							cur->bc_mp, block,
block            1913 fs/xfs/libxfs/xfs_btree.c 							sizeof(*block));
block            1934 fs/xfs/libxfs/xfs_btree.c 						keyno, block, &key);
block            1963 fs/xfs/libxfs/xfs_btree.c 			pp = xfs_btree_ptr_addr(cur, keyno, block);
block            1980 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
block            1982 fs/xfs/libxfs/xfs_btree.c 		    keyno > xfs_btree_get_numrecs(block) &&
block            1999 fs/xfs/libxfs/xfs_btree.c 	if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
block            2026 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            2035 fs/xfs/libxfs/xfs_btree.c 	rec = xfs_btree_rec_addr(cur, 1, block);
block            2041 fs/xfs/libxfs/xfs_btree.c 		for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
block            2042 fs/xfs/libxfs/xfs_btree.c 			rec = xfs_btree_rec_addr(cur, n, block);
block            2058 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            2067 fs/xfs/libxfs/xfs_btree.c 		memcpy(key, xfs_btree_key_addr(cur, 1, block),
block            2070 fs/xfs/libxfs/xfs_btree.c 		max_hkey = xfs_btree_high_key_addr(cur, 1, block);
block            2071 fs/xfs/libxfs/xfs_btree.c 		for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
block            2072 fs/xfs/libxfs/xfs_btree.c 			hkey = xfs_btree_high_key_addr(cur, n, block);
block            2080 fs/xfs/libxfs/xfs_btree.c 		memcpy(key, xfs_btree_key_addr(cur, 1, block),
block            2089 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            2092 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_level) == 0)
block            2093 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_get_leaf_keys(cur, block, key);
block            2095 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_get_node_keys(cur, block, key);
block            2122 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block,
block            2144 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_keys(cur, block, lkey);
block            2149 fs/xfs/libxfs/xfs_btree.c 		block = xfs_btree_get_block(cur, level, &bp);
block            2152 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_check_block(cur, block, level, bp);
block            2157 fs/xfs/libxfs/xfs_btree.c 		nlkey = xfs_btree_key_addr(cur, ptr, block);
block            2158 fs/xfs/libxfs/xfs_btree.c 		nhkey = xfs_btree_high_key_addr(cur, ptr, block);
block            2167 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_get_node_keys(cur, block, lkey);
block            2180 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            2182 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            2183 fs/xfs/libxfs/xfs_btree.c 	return __xfs_btree_updkeys(cur, level, block, bp, true);
block            2194 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            2202 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            2204 fs/xfs/libxfs/xfs_btree.c 		return __xfs_btree_updkeys(cur, level, block, bp, false);
block            2212 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_keys(cur, block, &key);
block            2217 fs/xfs/libxfs/xfs_btree.c 		block = xfs_btree_get_block(cur, level, &bp);
block            2219 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_check_block(cur, block, level, bp);
block            2224 fs/xfs/libxfs/xfs_btree.c 		kp = xfs_btree_key_addr(cur, ptr, block);
block            2242 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            2249 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, 0, &bp);
block            2252 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, 0, bp);
block            2258 fs/xfs/libxfs/xfs_btree.c 	rp = xfs_btree_rec_addr(cur, ptr, block);
block            2268 fs/xfs/libxfs/xfs_btree.c 	if (xfs_btree_is_lastrec(cur, block, 0)) {
block            2269 fs/xfs/libxfs/xfs_btree.c 		cur->bc_ops->update_lastrec(cur, block, rec,
block            2923 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;		/* btree block */
block            2940 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_iroot(cur);
block            2941 fs/xfs/libxfs/xfs_btree.c 	pp = xfs_btree_ptr_addr(cur, 1, block);
block            2961 fs/xfs/libxfs/xfs_btree.c 	memcpy(cblock, block, xfs_btree_block_len(cur));
block            2969 fs/xfs/libxfs/xfs_btree.c 	be16_add_cpu(&block->bb_level, 1);
block            2970 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_set_numrecs(block, 1);
block            2974 fs/xfs/libxfs/xfs_btree.c 	kp = xfs_btree_key_addr(cur, 1, block);
block            3023 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* one half of the old root block */
block            3063 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
block            3066 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
block            3071 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
block            3076 fs/xfs/libxfs/xfs_btree.c 		left = block;
block            3086 fs/xfs/libxfs/xfs_btree.c 		right = block;
block            3224 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* btree block */
block            3264 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            3266 fs/xfs/libxfs/xfs_btree.c 	numrecs = xfs_btree_get_numrecs(block);
block            3269 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, level, bp);
block            3277 fs/xfs/libxfs/xfs_btree.c 				xfs_btree_rec_addr(cur, ptr, block)));
block            3280 fs/xfs/libxfs/xfs_btree.c 				xfs_btree_key_addr(cur, ptr, block)));
block            3301 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            3302 fs/xfs/libxfs/xfs_btree.c 	numrecs = xfs_btree_get_numrecs(block);
block            3305 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, level, bp);
block            3321 fs/xfs/libxfs/xfs_btree.c 		kp = xfs_btree_key_addr(cur, ptr, block);
block            3322 fs/xfs/libxfs/xfs_btree.c 		pp = xfs_btree_ptr_addr(cur, ptr, block);
block            3341 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_set_numrecs(block, numrecs);
block            3347 fs/xfs/libxfs/xfs_btree.c 				xfs_btree_key_addr(cur, ptr + 1, block)));
block            3354 fs/xfs/libxfs/xfs_btree.c 		rp = xfs_btree_rec_addr(cur, ptr, block);
block            3360 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_set_numrecs(block, ++numrecs);
block            3365 fs/xfs/libxfs/xfs_btree.c 				xfs_btree_rec_addr(cur, ptr + 1, block)));
block            3382 fs/xfs/libxfs/xfs_btree.c 		xfs_btree_get_keys(cur, block, lkey);
block            3393 fs/xfs/libxfs/xfs_btree.c 	if (xfs_btree_is_lastrec(cur, block, level)) {
block            3394 fs/xfs/libxfs/xfs_btree.c 		cur->bc_ops->update_lastrec(cur, block, rec,
block            3510 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            3540 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_iroot(cur);
block            3541 fs/xfs/libxfs/xfs_btree.c 	if (xfs_btree_get_numrecs(block) != 1)
block            3558 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
block            3560 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
block            3568 fs/xfs/libxfs/xfs_btree.c 		block = ifp->if_broot;
block            3571 fs/xfs/libxfs/xfs_btree.c 	be16_add_cpu(&block->bb_numrecs, index);
block            3572 fs/xfs/libxfs/xfs_btree.c 	ASSERT(block->bb_numrecs == cblock->bb_numrecs);
block            3574 fs/xfs/libxfs/xfs_btree.c 	kp = xfs_btree_key_addr(cur, 1, block);
block            3578 fs/xfs/libxfs/xfs_btree.c 	pp = xfs_btree_ptr_addr(cur, 1, block);
block            3594 fs/xfs/libxfs/xfs_btree.c 	be16_add_cpu(&block->bb_level, -1);
block            3664 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;		/* btree block */
block            3693 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            3694 fs/xfs/libxfs/xfs_btree.c 	numrecs = xfs_btree_get_numrecs(block);
block            3697 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, level, bp);
block            3717 fs/xfs/libxfs/xfs_btree.c 		lkp = xfs_btree_key_addr(cur, ptr + 1, block);
block            3718 fs/xfs/libxfs/xfs_btree.c 		lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
block            3736 fs/xfs/libxfs/xfs_btree.c 				xfs_btree_rec_addr(cur, ptr + 1, block),
block            3745 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_set_numrecs(block, --numrecs);
block            3752 fs/xfs/libxfs/xfs_btree.c 	if (xfs_btree_is_lastrec(cur, block, level)) {
block            3753 fs/xfs/libxfs/xfs_btree.c 		cur->bc_ops->update_lastrec(cur, block, NULL,
block            3789 fs/xfs/libxfs/xfs_btree.c 			pp = xfs_btree_ptr_addr(cur, 1, block);
block            3828 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
block            3829 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
block            3901 fs/xfs/libxfs/xfs_btree.c 				ASSERT(xfs_btree_get_numrecs(block) >=
block            3970 fs/xfs/libxfs/xfs_btree.c 				ASSERT(xfs_btree_get_numrecs(block) >=
block            3997 fs/xfs/libxfs/xfs_btree.c 	    lrecs + xfs_btree_get_numrecs(block) <=
block            4004 fs/xfs/libxfs/xfs_btree.c 		right = block;
block            4014 fs/xfs/libxfs/xfs_btree.c 		   rrecs + xfs_btree_get_numrecs(block) <=
block            4021 fs/xfs/libxfs/xfs_btree.c 		left = block;
block            4221 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;	/* btree block */
block            4229 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, 0, &bp);
block            4232 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, 0, bp);
block            4240 fs/xfs/libxfs/xfs_btree.c 	if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
block            4248 fs/xfs/libxfs/xfs_btree.c 	*recp = xfs_btree_rec_addr(cur, ptr, block);
block            4261 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block		*block;
block            4268 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            4276 fs/xfs/libxfs/xfs_btree.c 	xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
block            4280 fs/xfs/libxfs/xfs_btree.c 	return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
block            4293 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block		*block = NULL;
block            4301 fs/xfs/libxfs/xfs_btree.c 		error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
block            4309 fs/xfs/libxfs/xfs_btree.c 			ptr = xfs_btree_ptr_addr(cur, 1, block);
block            4364 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            4368 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block            4370 fs/xfs/libxfs/xfs_btree.c 		if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
block            4372 fs/xfs/libxfs/xfs_btree.c 		block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
block            4374 fs/xfs/libxfs/xfs_btree.c 		if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
block            4376 fs/xfs/libxfs/xfs_btree.c 		block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
block            4426 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block            4430 fs/xfs/libxfs/xfs_btree.c 	if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
block            4432 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn))
block            4435 fs/xfs/libxfs/xfs_btree.c 	    be64_to_cpu(block->bb_u.l.bb_owner) != owner)
block            4447 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block            4450 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_numrecs) > max_recs)
block            4454 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
block            4455 fs/xfs/libxfs/xfs_btree.c 	    !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
block            4457 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
block            4458 fs/xfs/libxfs/xfs_btree.c 	    !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
block            4475 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block            4480 fs/xfs/libxfs/xfs_btree.c 	if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
block            4482 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
block            4484 fs/xfs/libxfs/xfs_btree.c 	if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
block            4501 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block            4505 fs/xfs/libxfs/xfs_btree.c 	if (be16_to_cpu(block->bb_numrecs) > max_recs)
block            4510 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
block            4511 fs/xfs/libxfs/xfs_btree.c 	    !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
block            4513 fs/xfs/libxfs/xfs_btree.c 	if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
block            4514 fs/xfs/libxfs/xfs_btree.c 	    !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
block            4649 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block		*block;
block            4660 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
block            4666 fs/xfs/libxfs/xfs_btree.c 	error = xfs_btree_check_block(cur, block, level, bp);
block            4673 fs/xfs/libxfs/xfs_btree.c 		block = xfs_btree_get_block(cur, level, &bp);
block            4676 fs/xfs/libxfs/xfs_btree.c 		if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
block            4686 fs/xfs/libxfs/xfs_btree.c 			recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
block            4714 fs/xfs/libxfs/xfs_btree.c 		lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
block            4715 fs/xfs/libxfs/xfs_btree.c 		hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
block            4716 fs/xfs/libxfs/xfs_btree.c 		pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
block            4729 fs/xfs/libxfs/xfs_btree.c 					&block);
block            4735 fs/xfs/libxfs/xfs_btree.c 			error = xfs_btree_check_block(cur, block, level, bp);
block            4918 fs/xfs/libxfs/xfs_btree.c 	struct xfs_btree_block	*block;
block            4921 fs/xfs/libxfs/xfs_btree.c 	block = xfs_btree_get_block(cur, 0, &bp);
block            4924 fs/xfs/libxfs/xfs_btree.c 	if (cur->bc_ptrs[0] < xfs_btree_get_numrecs(block))
block            4929 fs/xfs/libxfs/xfs_btree.c 		return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK);
block            4931 fs/xfs/libxfs/xfs_btree.c 		return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
block             118 fs/xfs/libxfs/xfs_btree.h 				  struct xfs_btree_block *block,
block             247 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block, int level, struct xfs_buf *bp);
block             249 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block, int level, struct xfs_buf *bp);
block             257 fs/xfs/libxfs/xfs_btree.h 	struct xfs_btree_block	*block,	/* generic btree block pointer */
block             427 fs/xfs/libxfs/xfs_btree.h static inline int xfs_btree_get_numrecs(struct xfs_btree_block *block)
block             429 fs/xfs/libxfs/xfs_btree.h 	return be16_to_cpu(block->bb_numrecs);
block             432 fs/xfs/libxfs/xfs_btree.h static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
block             435 fs/xfs/libxfs/xfs_btree.h 	block->bb_numrecs = cpu_to_be16(numrecs);
block             438 fs/xfs/libxfs/xfs_btree.h static inline int xfs_btree_get_level(struct xfs_btree_block *block)
block             440 fs/xfs/libxfs/xfs_btree.h 	return be16_to_cpu(block->bb_level);
block             491 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block);
block             493 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block);
block             495 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block);
block             497 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block);
block             507 fs/xfs/libxfs/xfs_btree.h 			   struct xfs_btree_block *block,
block             510 fs/xfs/libxfs/xfs_btree.h 		struct xfs_btree_block *block, union xfs_btree_key *key);
block             112 fs/xfs/libxfs/xfs_dir2_priv.h 		struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
block             257 fs/xfs/libxfs/xfs_ialloc_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             261 fs/xfs/libxfs/xfs_ialloc_btree.c 	if (!xfs_verify_magic(bp, block->bb_magic))
block             281 fs/xfs/libxfs/xfs_ialloc_btree.c 	level = be16_to_cpu(block->bb_level);
block              29 fs/xfs/libxfs/xfs_ialloc_btree.h #define XFS_INOBT_REC_ADDR(mp, block, index) \
block              31 fs/xfs/libxfs/xfs_ialloc_btree.h 		((char *)(block) + \
block              35 fs/xfs/libxfs/xfs_ialloc_btree.h #define XFS_INOBT_KEY_ADDR(mp, block, index) \
block              37 fs/xfs/libxfs/xfs_ialloc_btree.h 		((char *)(block) + \
block              41 fs/xfs/libxfs/xfs_ialloc_btree.h #define XFS_INOBT_PTR_ADDR(mp, block, index, maxrecs) \
block              43 fs/xfs/libxfs/xfs_ialloc_btree.h 		((char *)(block) + \
block              60 fs/xfs/libxfs/xfs_log_format.h static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
block              62 fs/xfs/libxfs/xfs_log_format.h 	return ((xfs_lsn_t)cycle << 32) | block;
block             205 fs/xfs/libxfs/xfs_refcount_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             210 fs/xfs/libxfs/xfs_refcount_btree.c 	if (!xfs_verify_magic(bp, block->bb_magic))
block             219 fs/xfs/libxfs/xfs_refcount_btree.c 	level = be16_to_cpu(block->bb_level);
block              27 fs/xfs/libxfs/xfs_refcount_btree.h #define XFS_REFCOUNT_REC_ADDR(block, index) \
block              29 fs/xfs/libxfs/xfs_refcount_btree.h 		((char *)(block) + \
block              33 fs/xfs/libxfs/xfs_refcount_btree.h #define XFS_REFCOUNT_KEY_ADDR(block, index) \
block              35 fs/xfs/libxfs/xfs_refcount_btree.h 		((char *)(block) + \
block              39 fs/xfs/libxfs/xfs_refcount_btree.h #define XFS_REFCOUNT_PTR_ADDR(block, index, maxrecs) \
block              41 fs/xfs/libxfs/xfs_refcount_btree.h 		((char *)(block) + \
block             292 fs/xfs/libxfs/xfs_rmap_btree.c 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
block             309 fs/xfs/libxfs/xfs_rmap_btree.c 	if (!xfs_verify_magic(bp, block->bb_magic))
block             318 fs/xfs/libxfs/xfs_rmap_btree.c 	level = be16_to_cpu(block->bb_level);
block              21 fs/xfs/libxfs/xfs_rmap_btree.h #define XFS_RMAP_REC_ADDR(block, index) \
block              23 fs/xfs/libxfs/xfs_rmap_btree.h 		((char *)(block) + XFS_RMAP_BLOCK_LEN + \
block              26 fs/xfs/libxfs/xfs_rmap_btree.h #define XFS_RMAP_KEY_ADDR(block, index) \
block              28 fs/xfs/libxfs/xfs_rmap_btree.h 		((char *)(block) + XFS_RMAP_BLOCK_LEN + \
block              31 fs/xfs/libxfs/xfs_rmap_btree.h #define XFS_RMAP_HIGH_KEY_ADDR(block, index) \
block              33 fs/xfs/libxfs/xfs_rmap_btree.h 		((char *)(block) + XFS_RMAP_BLOCK_LEN + \
block              37 fs/xfs/libxfs/xfs_rmap_btree.h #define XFS_RMAP_PTR_ADDR(block, index, maxrecs) \
block              39 fs/xfs/libxfs/xfs_rmap_btree.h 		((char *)(block) + XFS_RMAP_BLOCK_LEN + \
block              57 fs/xfs/libxfs/xfs_rtbitmap.c 	xfs_rtblock_t	block,		/* block number in bitmap or summary */
block              69 fs/xfs/libxfs/xfs_rtbitmap.c 	error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
block             103 fs/xfs/libxfs/xfs_rtbitmap.c 	xfs_rtblock_t	block;		/* bitmap block number */
block             118 fs/xfs/libxfs/xfs_rtbitmap.c 	block = XFS_BITTOBLOCK(mp, start);
block             119 fs/xfs/libxfs/xfs_rtbitmap.c 	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
block             171 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
block             217 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
block             278 fs/xfs/libxfs/xfs_rtbitmap.c 	xfs_rtblock_t	block;		/* bitmap block number */
block             293 fs/xfs/libxfs/xfs_rtbitmap.c 	block = XFS_BITTOBLOCK(mp, start);
block             294 fs/xfs/libxfs/xfs_rtbitmap.c 	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
block             345 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
block             390 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
block             541 fs/xfs/libxfs/xfs_rtbitmap.c 	xfs_rtblock_t	block;		/* bitmap block number */
block             554 fs/xfs/libxfs/xfs_rtbitmap.c 	block = XFS_BITTOBLOCK(mp, start);
block             558 fs/xfs/libxfs/xfs_rtbitmap.c 	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
block             603 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
block             643 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
block             775 fs/xfs/libxfs/xfs_rtbitmap.c 	xfs_rtblock_t	block;		/* bitmap block number */
block             788 fs/xfs/libxfs/xfs_rtbitmap.c 	block = XFS_BITTOBLOCK(mp, start);
block             792 fs/xfs/libxfs/xfs_rtbitmap.c 	error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
block             843 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
block             889 fs/xfs/libxfs/xfs_rtbitmap.c 			error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
block             379 fs/xfs/scrub/bmap.c 	struct xfs_btree_block	*block;
block             391 fs/xfs/scrub/bmap.c 			block = xfs_btree_get_block(bs->cur, i, &bp);
block             392 fs/xfs/scrub/bmap.c 			owner = be64_to_cpu(block->bb_u.l.bb_owner);
block             133 fs/xfs/scrub/btree.c 	struct xfs_btree_block	*block;
block             137 fs/xfs/scrub/btree.c 	block = xfs_btree_get_block(cur, 0, &bp);
block             138 fs/xfs/scrub/btree.c 	rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
block             180 fs/xfs/scrub/btree.c 	struct xfs_btree_block	*block;
block             184 fs/xfs/scrub/btree.c 	block = xfs_btree_get_block(cur, level, &bp);
block             185 fs/xfs/scrub/btree.c 	key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
block             209 fs/xfs/scrub/btree.c 	key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
block             310 fs/xfs/scrub/btree.c 	struct xfs_btree_block	*block)
block             318 fs/xfs/scrub/btree.c 	xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
block             319 fs/xfs/scrub/btree.c 	xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
block             320 fs/xfs/scrub/btree.c 	level = xfs_btree_get_level(block);
block             453 fs/xfs/scrub/btree.c 	struct xfs_btree_block	*block)
block             458 fs/xfs/scrub/btree.c 	numrecs = be16_to_cpu(block->bb_numrecs);
block             545 fs/xfs/scrub/btree.c 	struct xfs_btree_block	*block)
block             559 fs/xfs/scrub/btree.c 	xfs_btree_get_keys(cur, block, &block_keys);
block             605 fs/xfs/scrub/btree.c 	struct xfs_btree_block		*block;
block             632 fs/xfs/scrub/btree.c 	error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp);
block             633 fs/xfs/scrub/btree.c 	if (error || !block)
block             639 fs/xfs/scrub/btree.c 		block = xfs_btree_get_block(cur, level, &bp);
block             644 fs/xfs/scrub/btree.c 			    be16_to_cpu(block->bb_numrecs)) {
block             645 fs/xfs/scrub/btree.c 				xchk_btree_block_keys(&bs, level, block);
block             656 fs/xfs/scrub/btree.c 			recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
block             669 fs/xfs/scrub/btree.c 		if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
block             670 fs/xfs/scrub/btree.c 			xchk_btree_block_keys(&bs, level, block);
block             681 fs/xfs/scrub/btree.c 		pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
block             687 fs/xfs/scrub/btree.c 		error = xchk_btree_get_block(&bs, level, pp, &block, &bp);
block             688 fs/xfs/scrub/btree.c 		if (error || !block)
block            1138 fs/xfs/xfs_aops.c 	sector_t		block)
block            1155 fs/xfs/xfs_aops.c 	return iomap_bmap(mapping, block, &xfs_iomap_ops);
block              62 fs/xfs/xfs_bmap_util.c 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
block              65 fs/xfs/xfs_bmap_util.c 		block << (mp->m_super->s_blocksize_bits - 9),
block             238 fs/xfs/xfs_bmap_util.c 	struct xfs_btree_block	*block,
block             246 fs/xfs/xfs_bmap_util.c 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
block             271 fs/xfs/xfs_bmap_util.c 	struct xfs_btree_block	*block, *nextblock;
block             279 fs/xfs/xfs_bmap_util.c 	block = XFS_BUF_TO_BLOCK(bp);
block             283 fs/xfs/xfs_bmap_util.c 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
block             297 fs/xfs/xfs_bmap_util.c 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
block             311 fs/xfs/xfs_bmap_util.c 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
block             312 fs/xfs/xfs_bmap_util.c 			numrecs = be16_to_cpu(block->bb_numrecs);
block             314 fs/xfs/xfs_bmap_util.c 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
block             325 fs/xfs/xfs_bmap_util.c 			block = XFS_BUF_TO_BLOCK(bp);
block             345 fs/xfs/xfs_bmap_util.c 	struct xfs_btree_block	*block;	/* current btree block */
block             373 fs/xfs/xfs_bmap_util.c 		block = ifp->if_broot;
block             374 fs/xfs/xfs_bmap_util.c 		level = be16_to_cpu(block->bb_level);
block             376 fs/xfs/xfs_bmap_util.c 		pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
block             465 fs/xfs/xfs_log_priv.h xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
block             470 fs/xfs/xfs_log_priv.h 	*block = BLOCK_LSN(val);
block             477 fs/xfs/xfs_log_priv.h xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
block             479 fs/xfs/xfs_log_priv.h 	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
block            1556 fs/xfs/xfs_log_recover.c 	int			block,
block            1567 fs/xfs/xfs_log_recover.c 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
block             104 fs/xfs/xfs_rtalloc.h 		  xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
block             321 fs/xfs/xfs_sysfs.c 	int block;
block             326 fs/xfs/xfs_sysfs.c 	block = log->l_curr_block;
block             329 fs/xfs/xfs_sysfs.c 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
block             339 fs/xfs/xfs_sysfs.c 	int block;
block             342 fs/xfs/xfs_sysfs.c 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
block             343 fs/xfs/xfs_sysfs.c 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
block            3275 fs/xfs/xfs_trace.h 		__field(xfs_daddr_t, block)
block            3284 fs/xfs/xfs_trace.h 		__entry->block = fsmap->fmr_physical;
block            3293 fs/xfs/xfs_trace.h 		  __entry->block,
block              21 include/crypto/md5.h 	u32 block[MD5_BLOCK_WORDS];
block              95 include/drm/drm_displayid.h #define for_each_displayid_db(displayid, block, idx, length) \
block              96 include/drm/drm_displayid.h 	for ((block) = (struct displayid_block *)&(displayid)[idx]; \
block              98 include/drm/drm_displayid.h 	     (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
block              99 include/drm/drm_displayid.h 	     (block)->num_bytes > 0; \
block             100 include/drm/drm_displayid.h 	     (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
block             101 include/drm/drm_displayid.h 	     (block) = (struct displayid_block *)&(displayid)[idx])
block             472 include/drm/drm_edid.h 	int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
block             495 include/drm/drm_edid.h bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
block            1123 include/linux/ata.h static inline bool lba_28_ok(u64 block, u32 n_block)
block            1126 include/linux/ata.h 	return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS);
block            1129 include/linux/ata.h static inline bool lba_48_ok(u64 block, u32 n_block)
block            1132 include/linux/ata.h 	return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48);
block            1231 include/linux/blkdev.h static inline int sb_issue_discard(struct super_block *sb, sector_t block,
block            1235 include/linux/blkdev.h 				    block << (sb->s_blocksize_bits -
block            1241 include/linux/blkdev.h static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
block            1245 include/linux/blkdev.h 				    block << (sb->s_blocksize_bits -
block             175 include/linux/buffer_head.h void clean_bdev_aliases(struct block_device *bdev, sector_t block,
block             185 include/linux/buffer_head.h struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
block             187 include/linux/buffer_head.h struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
block             191 include/linux/buffer_head.h void __breadahead(struct block_device *, sector_t block, unsigned int size);
block             192 include/linux/buffer_head.h void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
block             195 include/linux/buffer_head.h 				sector_t block, unsigned size, gfp_t gfp);
block             307 include/linux/buffer_head.h sb_bread(struct super_block *sb, sector_t block)
block             309 include/linux/buffer_head.h 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
block             313 include/linux/buffer_head.h sb_bread_unmovable(struct super_block *sb, sector_t block)
block             315 include/linux/buffer_head.h 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
block             319 include/linux/buffer_head.h sb_breadahead(struct super_block *sb, sector_t block)
block             321 include/linux/buffer_head.h 	__breadahead(sb->s_bdev, block, sb->s_blocksize);
block             325 include/linux/buffer_head.h sb_breadahead_unmovable(struct super_block *sb, sector_t block)
block             327 include/linux/buffer_head.h 	__breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
block             331 include/linux/buffer_head.h sb_getblk(struct super_block *sb, sector_t block)
block             333 include/linux/buffer_head.h 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
block             338 include/linux/buffer_head.h sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
block             340 include/linux/buffer_head.h 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
block             344 include/linux/buffer_head.h sb_find_get_block(struct super_block *sb, sector_t block)
block             346 include/linux/buffer_head.h 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
block             350 include/linux/buffer_head.h map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
block             354 include/linux/buffer_head.h 	bh->b_blocknr = block;
block             378 include/linux/buffer_head.h 						   sector_t block,
block             381 include/linux/buffer_head.h 	return __getblk_gfp(bdev, block, size, 0);
block             385 include/linux/buffer_head.h 					   sector_t block,
block             388 include/linux/buffer_head.h 	return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
block             402 include/linux/buffer_head.h __bread(struct block_device *bdev, sector_t block, unsigned size)
block             404 include/linux/buffer_head.h 	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
block              55 include/linux/dm-bufio.h void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
block              62 include/linux/dm-bufio.h void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
block              69 include/linux/dm-bufio.h void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
block              78 include/linux/dm-bufio.h 		       sector_t block, unsigned n_blocks);
block             132 include/linux/dm-bufio.h void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
block            1505 include/linux/efi.h 			  bool block, unsigned long size, void *data);
block             122 include/linux/exportfs.h  			u32 block;
block             127 include/linux/iio/buffer-dma.h 		struct iio_dma_buffer_block *block);
block             131 include/linux/iio/buffer-dma.h void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
block            1280 include/linux/jbd2.h 			      unsigned long *block);
block            1281 include/linux/jbd2.h int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
block            1282 include/linux/jbd2.h void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
block             215 include/linux/lockd/lockd.h void		  nlmclnt_finish_block(struct nlm_wait *block);
block             216 include/linux/lockd/lockd.h int		  nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
block              65 include/linux/lockd/xdr.h 	u32			block;
block             156 include/linux/mfd/stmpe.h 			     enum stmpe_block block);
block             148 include/linux/mfd/tc3589x.h 	unsigned int block;
block              49 include/linux/mtd/blktrans.h 		    unsigned long block, char *buffer);
block              51 include/linux/mtd/blktrans.h 		     unsigned long block, char *buffer);
block              53 include/linux/mtd/blktrans.h 		       unsigned long block, unsigned nr_blocks);
block              51 include/linux/mtd/inftl.h int INFTL_formatblock(struct INFTLrecord *s, int block);
block             728 include/linux/mtd/nand.h int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
block              42 include/linux/mtd/nftl.h int NFTL_formatblock(struct NFTLrecord *s, int block);
block             227 include/linux/mtd/onenand.h loff_t onenand_addr(struct onenand_chip *this, int block);
block             109 include/linux/n_r3964.h 	  struct r3964_block_header *block;
block             557 include/linux/nfs_xdr.h 	unsigned char		block : 1;
block            1643 include/linux/platform_data/cros_ec_commands.h 	uint8_t block[EC_VBNV_BLOCK_SIZE];
block            1647 include/linux/platform_data/cros_ec_commands.h 	uint8_t block[EC_VBNV_BLOCK_SIZE];
block             502 include/linux/suspend.h extern bool pm_get_wakeup_count(unsigned int *count, bool block);
block             142 include/linux/thunderbolt.h struct tb_property_dir *tb_property_parse_dir(const u32 *block,
block             144 include/linux/thunderbolt.h ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
block             278 include/media/cec.h 		    bool block);
block             280 include/media/cec.h 		     bool block);
block             286 include/media/cec.h 		     bool block);
block             389 include/media/cec.h 				   bool block)
block             499 include/media/cec.h 	unsigned int block;
block             516 include/media/cec.h 	for (block = 1; block < blocks; block++) {
block             517 include/media/cec.h 		unsigned int offset = block * 128;
block             293 include/net/flow_offload.h 	struct flow_block *block;
block             318 include/net/flow_offload.h struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
block            5486 include/net/mac80211.h 			       struct ieee80211_sta *pubsta, bool block);
block              39 include/net/pkt_cls.h struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
block              42 include/net/pkt_cls.h struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
block              46 include/net/pkt_cls.h void tcf_block_netif_keep_dst(struct tcf_block *block);
block              53 include/net/pkt_cls.h void tcf_block_put(struct tcf_block *block);
block              54 include/net/pkt_cls.h void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
block              57 include/net/pkt_cls.h static inline bool tcf_block_shared(struct tcf_block *block)
block              59 include/net/pkt_cls.h 	return block->index;
block              62 include/net/pkt_cls.h static inline bool tcf_block_non_null_shared(struct tcf_block *block)
block              64 include/net/pkt_cls.h 	return block && block->index;
block              67 include/net/pkt_cls.h static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
block              69 include/net/pkt_cls.h 	WARN_ON(tcf_block_shared(block));
block              70 include/net/pkt_cls.h 	return block->q;
block              77 include/net/pkt_cls.h static inline bool tcf_block_shared(struct tcf_block *block)
block              82 include/net/pkt_cls.h static inline bool tcf_block_non_null_shared(struct tcf_block *block)
block             103 include/net/pkt_cls.h static inline void tcf_block_put(struct tcf_block *block)
block             108 include/net/pkt_cls.h void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
block             113 include/net/pkt_cls.h static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
block             119 include/net/pkt_cls.h int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
block             126 include/net/pkt_cls.h void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
block             158 include/net/pkt_cls.h 	struct Qdisc *q = tp->chain->block->q;
block             182 include/net/pkt_cls.h 	struct Qdisc *q = tp->chain->block->q;
block             515 include/net/pkt_cls.h int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
block             517 include/net/pkt_cls.h int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
block             520 include/net/pkt_cls.h int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
block             525 include/net/pkt_cls.h int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
block             528 include/net/pkt_cls.h int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
block             392 include/net/sch_generic.h 	struct tcf_block *block;
block              75 include/soc/at91/atmel_tcb.h extern struct atmel_tc *atmel_tc_alloc(unsigned block);
block             169 include/sound/emux_synth.h 	void *block;		/* sample block pointer (optional) */
block             535 include/sound/gus.h int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block);
block             553 include/sound/gus.h 			       struct snd_gf1_dma_block * block,
block              14 include/sound/pcm_oss.h 		     block:1,
block              37 include/sound/soundfont.h 	struct snd_util_memblk *block;	/* allocated data block */
block              28 include/sound/util_mem.h 	struct list_head block;		/* block linked-list header */
block             946 include/trace/events/afs.h 		     unsigned int block,
block             952 include/trace/events/afs.h 	    TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
block             959 include/trace/events/afs.h 		    __field(unsigned int,		block		)
block             973 include/trace/events/afs.h 		    __entry->block	= block;
block             985 include/trace/events/afs.h 		      __entry->block, __entry->slot,
block             268 include/trace/events/bcache.h 		__field(unsigned,	block			)
block             274 include/trace/events/bcache.h 		__entry->block	= b->written;
block               3 include/trace/events/block.h #define TRACE_SYSTEM block
block            2017 include/trace/events/btrfs.h 		__field(	u64,	block		)
block            2027 include/trace/events/btrfs.h 		__entry->block		= eb->start;
block            2038 include/trace/events/btrfs.h 		__entry->block, __entry->generation,
block            2061 include/trace/events/btrfs.h 		__field(	u64,	block		)
block            2068 include/trace/events/btrfs.h 		__entry->block		= eb->start;
block            2075 include/trace/events/btrfs.h 		__entry->block, __entry->generation,
block             680 include/trace/events/ext4.h 		 unsigned long long block, unsigned int count),
block             682 include/trace/events/ext4.h 	TP_ARGS(pa, block, count),
block             687 include/trace/events/ext4.h 		__field(	__u64,	block			)
block             695 include/trace/events/ext4.h 		__entry->block		= block;
block             702 include/trace/events/ext4.h 		  __entry->block, __entry->count)
block             811 include/trace/events/ext4.h 	TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
block             813 include/trace/events/ext4.h 	TP_ARGS(ar, block),
block             818 include/trace/events/ext4.h 		__field(	__u64,	block			)
block             832 include/trace/events/ext4.h 		__entry->block	= block;
block             847 include/trace/events/ext4.h 		  __entry->len, __entry->block, __entry->logical,
block             853 include/trace/events/ext4.h 	TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
block             856 include/trace/events/ext4.h 	TP_ARGS(inode, block, count, flags),
block             861 include/trace/events/ext4.h 		__field(	__u64,	block			)
block             870 include/trace/events/ext4.h 		__entry->block		= block;
block             879 include/trace/events/ext4.h 		  __entry->mode, __entry->block, __entry->count,
block            1139 include/trace/events/ext4.h 	TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
block            1141 include/trace/events/ext4.h 	TP_ARGS(inode, is_metadata, block),
block            1146 include/trace/events/ext4.h 		__field(	__u64,	block			)
block            1154 include/trace/events/ext4.h 		__entry->block	= block;
block            1162 include/trace/events/ext4.h 		  __entry->mode, __entry->is_metadata, __entry->block)
block            2634 include/trace/events/ext4.h 		__field(u64, block)
block            2642 include/trace/events/ext4.h 		__entry->block = fsmap->fmr_physical;
block            2650 include/trace/events/ext4.h 		  __entry->block,
block             169 include/trace/events/nilfs2.h 		     unsigned long block),
block             171 include/trace/events/nilfs2.h 	    TP_ARGS(inode, ino, block),
block             176 include/trace/events/nilfs2.h 		    __field(unsigned long, block)
block             182 include/trace/events/nilfs2.h 		    __entry->block = block;
block             188 include/trace/events/nilfs2.h 		      __entry->block)
block              58 include/trace/events/smbus.h 			__entry->len = data->block[0] + 1;
block              60 include/trace/events/smbus.h 			memcpy(__entry->buf, data->block, __entry->len);
block             167 include/trace/events/smbus.h 			__entry->len = data->block[0] + 1;
block             169 include/trace/events/smbus.h 			memcpy(__entry->buf, data->block, __entry->len);
block             732 include/uapi/linux/fuse.h 	uint64_t	block;
block             738 include/uapi/linux/fuse.h 	uint64_t	block;
block             138 include/uapi/linux/i2c.h 	__u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */
block             203 include/uapi/linux/rio_mport_cdev.h 	__u64 block;	/* Pointer to array of <count> transfers */
block            1889 include/uapi/linux/videodev2.h 	__u8	block;
block             431 kernel/power/snapshot.c 	struct rtree_node *node, *block, **dst;
block             457 kernel/power/snapshot.c 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
block             458 kernel/power/snapshot.c 	if (!block)
block             483 kernel/power/snapshot.c 	*dst = block;
block              94 lib/xz/xz_dec_stream.c 	} block;
block             232 lib/xz/xz_dec_stream.c 	s->block.compressed += b->in_pos - s->in_start;
block             233 lib/xz/xz_dec_stream.c 	s->block.uncompressed += b->out_pos - s->out_start;
block             239 lib/xz/xz_dec_stream.c 	if (s->block.compressed > s->block_header.compressed
block             240 lib/xz/xz_dec_stream.c 			|| s->block.uncompressed
block             251 lib/xz/xz_dec_stream.c 					!= s->block.compressed)
block             256 lib/xz/xz_dec_stream.c 					!= s->block.uncompressed)
block             259 lib/xz/xz_dec_stream.c 		s->block.hash.unpadded += s->block_header.size
block             260 lib/xz/xz_dec_stream.c 				+ s->block.compressed;
block             263 lib/xz/xz_dec_stream.c 		s->block.hash.unpadded += check_sizes[s->check_type];
block             266 lib/xz/xz_dec_stream.c 			s->block.hash.unpadded += 4;
block             269 lib/xz/xz_dec_stream.c 		s->block.hash.uncompressed += s->block.uncompressed;
block             270 lib/xz/xz_dec_stream.c 		s->block.hash.crc32 = xz_crc32(
block             271 lib/xz/xz_dec_stream.c 				(const uint8_t *)&s->block.hash,
block             272 lib/xz/xz_dec_stream.c 				sizeof(s->block.hash), s->block.hash.crc32);
block             274 lib/xz/xz_dec_stream.c 		++s->block.count;
block             316 lib/xz/xz_dec_stream.c 			if (s->index.count != s->block.count)
block             543 lib/xz/xz_dec_stream.c 	s->block.compressed = 0;
block             544 lib/xz/xz_dec_stream.c 	s->block.uncompressed = 0;
block             642 lib/xz/xz_dec_stream.c 			while (s->block.compressed & 3) {
block             649 lib/xz/xz_dec_stream.c 				++s->block.compressed;
block             696 lib/xz/xz_dec_stream.c 			if (!memeq(&s->block.hash, &s->index.hash,
block             697 lib/xz/xz_dec_stream.c 					sizeof(s->block.hash)))
block             822 lib/xz/xz_dec_stream.c 	memzero(&s->block, sizeof(s->block));
block             193 mm/page_io.c   			sector_t block;
block             195 mm/page_io.c   			block = bmap(inode, probe_block + block_in_page);
block             196 mm/page_io.c   			if (block == 0)
block             198 mm/page_io.c   			if (block != first_block + block_in_page) {
block             336 mm/percpu.c    static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
block             345 mm/percpu.c    	if (block->scan_hint &&
block             346 mm/percpu.c    	    block->contig_hint_start > block->scan_hint_start &&
block             347 mm/percpu.c    	    alloc_bits > block->scan_hint)
block             348 mm/percpu.c    		return block->scan_hint_start + block->scan_hint;
block             350 mm/percpu.c    	return block->first_free;
block             369 mm/percpu.c    	struct pcpu_block_md *block;
block             372 mm/percpu.c    	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
block             373 mm/percpu.c    	     block++, i++) {
block             376 mm/percpu.c    			*bits += block->left_free;
block             377 mm/percpu.c    			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
block             390 mm/percpu.c    		*bits = block->contig_hint;
block             391 mm/percpu.c    		if (*bits && block->contig_hint_start >= block_off &&
block             392 mm/percpu.c    		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
block             394 mm/percpu.c    					block->contig_hint_start);
block             400 mm/percpu.c    		*bits = block->right_free;
block             401 mm/percpu.c    		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
block             424 mm/percpu.c    	struct pcpu_block_md *block;
block             427 mm/percpu.c    	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
block             428 mm/percpu.c    	     block++, i++) {
block             431 mm/percpu.c    			*bits += block->left_free;
block             434 mm/percpu.c    			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
block             439 mm/percpu.c    		*bits = ALIGN(block->contig_hint_start, align) -
block             440 mm/percpu.c    			block->contig_hint_start;
block             445 mm/percpu.c    		if (block->contig_hint &&
block             446 mm/percpu.c    		    block->contig_hint_start >= block_off &&
block             447 mm/percpu.c    		    block->contig_hint >= *bits + alloc_bits) {
block             448 mm/percpu.c    			int start = pcpu_next_hint(block, alloc_bits);
block             450 mm/percpu.c    			*bits += alloc_bits + block->contig_hint_start -
block             458 mm/percpu.c    		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
block             603 mm/percpu.c    static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
block             607 mm/percpu.c    	block->first_free = min(block->first_free, start);
block             609 mm/percpu.c    		block->left_free = contig;
block             611 mm/percpu.c    	if (end == block->nr_bits)
block             612 mm/percpu.c    		block->right_free = contig;
block             614 mm/percpu.c    	if (contig > block->contig_hint) {
block             616 mm/percpu.c    		if (start > block->contig_hint_start) {
block             617 mm/percpu.c    			if (block->contig_hint > block->scan_hint) {
block             618 mm/percpu.c    				block->scan_hint_start =
block             619 mm/percpu.c    					block->contig_hint_start;
block             620 mm/percpu.c    				block->scan_hint = block->contig_hint;
block             621 mm/percpu.c    			} else if (start < block->scan_hint_start) {
block             627 mm/percpu.c    				block->scan_hint = 0;
block             630 mm/percpu.c    			block->scan_hint = 0;
block             632 mm/percpu.c    		block->contig_hint_start = start;
block             633 mm/percpu.c    		block->contig_hint = contig;
block             634 mm/percpu.c    	} else if (contig == block->contig_hint) {
block             635 mm/percpu.c    		if (block->contig_hint_start &&
block             637 mm/percpu.c    		     __ffs(start) > __ffs(block->contig_hint_start))) {
block             639 mm/percpu.c    			block->contig_hint_start = start;
block             640 mm/percpu.c    			if (start < block->scan_hint_start &&
block             641 mm/percpu.c    			    block->contig_hint > block->scan_hint)
block             642 mm/percpu.c    				block->scan_hint = 0;
block             643 mm/percpu.c    		} else if (start > block->scan_hint_start ||
block             644 mm/percpu.c    			   block->contig_hint > block->scan_hint) {
block             650 mm/percpu.c    			block->scan_hint_start = start;
block             651 mm/percpu.c    			block->scan_hint = contig;
block             659 mm/percpu.c    		if ((start < block->contig_hint_start &&
block             660 mm/percpu.c    		     (contig > block->scan_hint ||
block             661 mm/percpu.c    		      (contig == block->scan_hint &&
block             662 mm/percpu.c    		       start > block->scan_hint_start)))) {
block             663 mm/percpu.c    			block->scan_hint_start = start;
block             664 mm/percpu.c    			block->scan_hint = contig;
block             691 mm/percpu.c    	struct pcpu_block_md *block;
block             697 mm/percpu.c    	block = chunk->md_blocks + s_index;
block             703 mm/percpu.c    	pcpu_block_update(block, s_off, e_off);
block             750 mm/percpu.c    	struct pcpu_block_md *block = chunk->md_blocks + index;
block             755 mm/percpu.c    	if (block->scan_hint) {
block             756 mm/percpu.c    		start = block->scan_hint_start + block->scan_hint;
block             757 mm/percpu.c    		block->contig_hint_start = block->scan_hint_start;
block             758 mm/percpu.c    		block->contig_hint = block->scan_hint;
block             759 mm/percpu.c    		block->scan_hint = 0;
block             761 mm/percpu.c    		start = block->first_free;
block             762 mm/percpu.c    		block->contig_hint = 0;
block             765 mm/percpu.c    	block->right_free = 0;
block             770 mm/percpu.c    		pcpu_block_update(block, rs, re);
block             789 mm/percpu.c    	struct pcpu_block_md *s_block, *e_block, *block;
block             882 mm/percpu.c    		for (block = s_block + 1; block < e_block; block++) {
block             883 mm/percpu.c    			block->scan_hint = 0;
block             884 mm/percpu.c    			block->contig_hint = 0;
block             885 mm/percpu.c    			block->left_free = 0;
block             886 mm/percpu.c    			block->right_free = 0;
block             935 mm/percpu.c    	struct pcpu_block_md *s_block, *e_block, *block;
block            1001 mm/percpu.c    		for (block = s_block + 1; block < e_block; block++) {
block            1002 mm/percpu.c    			block->first_free = 0;
block            1003 mm/percpu.c    			block->scan_hint = 0;
block            1004 mm/percpu.c    			block->contig_hint_start = 0;
block            1005 mm/percpu.c    			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
block            1006 mm/percpu.c    			block->left_free = PCPU_BITMAP_BLOCK_BITS;
block            1007 mm/percpu.c    			block->right_free = PCPU_BITMAP_BLOCK_BITS;
block            1272 mm/percpu.c    static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
block            1274 mm/percpu.c    	block->scan_hint = 0;
block            1275 mm/percpu.c    	block->contig_hint = nr_bits;
block            1276 mm/percpu.c    	block->left_free = nr_bits;
block            1277 mm/percpu.c    	block->right_free = nr_bits;
block            1278 mm/percpu.c    	block->first_free = 0;
block            1279 mm/percpu.c    	block->nr_bits = nr_bits;
block             382 mm/slob.c      static void slob_free(void *block, int size)
block             385 mm/slob.c      	slob_t *prev, *next, *b = (slob_t *)block;
block             390 mm/slob.c      	if (unlikely(ZERO_OR_NULL_PTR(block)))
block             394 mm/slob.c      	sp = virt_to_page(block);
block             536 mm/slob.c      void kfree(const void *block)
block             540 mm/slob.c      	trace_kfree(_RET_IP_, block);
block             542 mm/slob.c      	if (unlikely(ZERO_OR_NULL_PTR(block)))
block             544 mm/slob.c      	kmemleak_free(block);
block             546 mm/slob.c      	sp = virt_to_page(block);
block             549 mm/slob.c      		unsigned int *m = (unsigned int *)(block - align);
block             562 mm/slob.c      size_t __ksize(const void *block)
block             568 mm/slob.c      	BUG_ON(!block);
block             569 mm/slob.c      	if (unlikely(block == ZERO_SIZE_PTR))
block             572 mm/slob.c      	sp = virt_to_page(block);
block             577 mm/slob.c      	m = (unsigned int *)(block - align);
block             280 net/can/j1939/j1939-priv.h 		unsigned int block;
block             869 net/can/j1939/transport.c 	len = min3(len, session->pkt.block, j1939_tp_block ?: 255);
block            1364 net/can/j1939/transport.c 	else if (dat[1] > session->pkt.block /* 0xff for etp */)
block            1561 net/can/j1939/transport.c 	session->pkt.block = 0xff;
block            1568 net/can/j1939/transport.c 		session->pkt.block = min(dat[3], dat[4]);
block            1886 net/can/j1939/transport.c 	session->pkt.block = skcb->addr.type == J1939_ETP ? 255 :
block             198 net/core/flow_offload.c struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
block             203 net/core/flow_offload.c 	list_for_each_entry(block_cb, &block->cb_list, list) {
block             273 net/core/flow_offload.c 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
block            1019 net/dsa/slave.c 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
block            1309 net/ipv6/addrconf.c 				bool block)
block            1413 net/ipv6/addrconf.c 	ift = ipv6_add_addr(idev, &cfg, block, NULL);
block              57 net/mac80211/michael.c 	size_t block, blocks, left;
block              66 net/mac80211/michael.c 	for (block = 0; block < blocks; block++)
block              67 net/mac80211/michael.c 		michael_block(&mctx, get_unaligned_le32(&data[block * 4]));
block            1804 net/mac80211/sta_info.c 			       struct ieee80211_sta *pubsta, bool block)
block            1808 net/mac80211/sta_info.c 	trace_api_sta_block_awake(sta->local, pubsta, block);
block            1810 net/mac80211/sta_info.c 	if (block) {
block            2153 net/mac80211/trace.h 		 struct ieee80211_sta *sta, bool block),
block            2155 net/mac80211/trace.h 	TP_ARGS(local, sta, block),
block            2160 net/mac80211/trace.h 		__field(bool, block)
block            2166 net/mac80211/trace.h 		__entry->block = block;
block            2171 net/mac80211/trace.h 		LOCAL_PR_ARG, STA_PR_ARG, __entry->block
block             239 net/netfilter/nf_tables_offload.c 	bo.block = &chain->flow_block;
block             265 net/netfilter/nf_tables_offload.c 	bo.block = &chain->flow_block;
block             284 net/netfilter/nf_tables_offload.c 	bo.block = &chain->flow_block;
block              75 net/sched/act_api.c 		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
block              60 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block              62 net/sched/cls_api.c 	mutex_lock(&block->proto_destroy_lock);
block              63 net/sched/cls_api.c 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
block              65 net/sched/cls_api.c 	mutex_unlock(&block->proto_destroy_lock);
block              84 net/sched/cls_api.c 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
block              99 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block             101 net/sched/cls_api.c 	mutex_lock(&block->proto_destroy_lock);
block             104 net/sched/cls_api.c 	mutex_unlock(&block->proto_destroy_lock);
block             338 net/sched/cls_api.c #define ASSERT_BLOCK_LOCKED(block)					\
block             339 net/sched/cls_api.c 	lockdep_assert_held(&(block)->lock)
block             347 net/sched/cls_api.c static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
block             352 net/sched/cls_api.c 	ASSERT_BLOCK_LOCKED(block);
block             357 net/sched/cls_api.c 	list_add_tail(&chain->list, &block->chain_list);
block             359 net/sched/cls_api.c 	chain->block = block;
block             363 net/sched/cls_api.c 		block->chain0.chain = chain;
block             378 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block             383 net/sched/cls_api.c 	mutex_lock(&block->lock);
block             384 net/sched/cls_api.c 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
block             386 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             393 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block             395 net/sched/cls_api.c 	ASSERT_BLOCK_LOCKED(block);
block             399 net/sched/cls_api.c 		block->chain0.chain = NULL;
block             401 net/sched/cls_api.c 	if (list_empty(&block->chain_list) &&
block             402 net/sched/cls_api.c 	    refcount_read(&block->refcnt) == 0)
block             408 net/sched/cls_api.c static void tcf_block_destroy(struct tcf_block *block)
block             410 net/sched/cls_api.c 	mutex_destroy(&block->lock);
block             411 net/sched/cls_api.c 	mutex_destroy(&block->proto_destroy_lock);
block             412 net/sched/cls_api.c 	kfree_rcu(block, rcu);
block             417 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block             422 net/sched/cls_api.c 		tcf_block_destroy(block);
block             427 net/sched/cls_api.c 	ASSERT_BLOCK_LOCKED(chain->block);
block             434 net/sched/cls_api.c 	ASSERT_BLOCK_LOCKED(chain->block);
block             442 net/sched/cls_api.c static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
block             447 net/sched/cls_api.c 	ASSERT_BLOCK_LOCKED(block);
block             449 net/sched/cls_api.c 	list_for_each_entry(chain, &block->chain_list, list) {
block             459 net/sched/cls_api.c static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
block             466 net/sched/cls_api.c 	mutex_lock(&block->lock);
block             467 net/sched/cls_api.c 	chain = tcf_chain_lookup(block, chain_index);
block             473 net/sched/cls_api.c 		chain = tcf_chain_create(block, chain_index);
block             481 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             495 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             499 net/sched/cls_api.c static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
block             502 net/sched/cls_api.c 	return __tcf_chain_get(block, chain_index, create, false);
block             505 net/sched/cls_api.c struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
block             507 net/sched/cls_api.c 	return __tcf_chain_get(block, chain_index, true, true);
block             515 net/sched/cls_api.c 				  struct tcf_block *block, struct sk_buff *oskb,
block             521 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block             527 net/sched/cls_api.c 	mutex_lock(&block->lock);
block             530 net/sched/cls_api.c 			mutex_unlock(&block->lock);
block             550 net/sched/cls_api.c 				       block, NULL, 0, 0, false);
block             557 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             605 net/sched/cls_api.c static int tcf_block_setup(struct tcf_block *block,
block             608 net/sched/cls_api.c static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
block             618 net/sched/cls_api.c 		.block_shared	= tcf_block_non_null_shared(block),
block             622 net/sched/cls_api.c 	if (!block)
block             625 net/sched/cls_api.c 	bo.block = &block->flow_block;
block             627 net/sched/cls_api.c 	down_write(&block->cb_lock);
block             630 net/sched/cls_api.c 	tcf_block_setup(block, &bo);
block             631 net/sched/cls_api.c 	up_write(&block->cb_lock);
block             671 net/sched/cls_api.c 	struct tcf_block *block;
block             673 net/sched/cls_api.c 	block = tc_dev_block(dev, true);
block             674 net/sched/cls_api.c 	tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
block             676 net/sched/cls_api.c 	block = tc_dev_block(dev, false);
block             677 net/sched/cls_api.c 	tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
block             680 net/sched/cls_api.c static void tc_indr_block_call(struct tcf_block *block,
block             690 net/sched/cls_api.c 		.block		= &block->flow_block,
block             691 net/sched/cls_api.c 		.block_shared	= tcf_block_shared(block),
block             697 net/sched/cls_api.c 	tcf_block_setup(block, &bo);
block             700 net/sched/cls_api.c static bool tcf_block_offload_in_use(struct tcf_block *block)
block             702 net/sched/cls_api.c 	return atomic_read(&block->offloadcnt);
block             705 net/sched/cls_api.c static int tcf_block_offload_cmd(struct tcf_block *block,
block             717 net/sched/cls_api.c 	bo.block = &block->flow_block;
block             718 net/sched/cls_api.c 	bo.block_shared = tcf_block_shared(block);
block             726 net/sched/cls_api.c 	return tcf_block_setup(block, &bo);
block             729 net/sched/cls_api.c static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
block             736 net/sched/cls_api.c 	down_write(&block->cb_lock);
block             743 net/sched/cls_api.c 	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
block             749 net/sched/cls_api.c 	err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
block             755 net/sched/cls_api.c 	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
block             756 net/sched/cls_api.c 	up_write(&block->cb_lock);
block             760 net/sched/cls_api.c 	if (tcf_block_offload_in_use(block)) {
block             765 net/sched/cls_api.c 	block->nooffloaddevcnt++;
block             766 net/sched/cls_api.c 	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
block             768 net/sched/cls_api.c 	up_write(&block->cb_lock);
block             772 net/sched/cls_api.c static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
block             778 net/sched/cls_api.c 	down_write(&block->cb_lock);
block             779 net/sched/cls_api.c 	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
block             783 net/sched/cls_api.c 	err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
block             786 net/sched/cls_api.c 	up_write(&block->cb_lock);
block             790 net/sched/cls_api.c 	WARN_ON(block->nooffloaddevcnt-- == 0);
block             791 net/sched/cls_api.c 	up_write(&block->cb_lock);
block             795 net/sched/cls_api.c tcf_chain0_head_change_cb_add(struct tcf_block *block,
block             810 net/sched/cls_api.c 	mutex_lock(&block->lock);
block             811 net/sched/cls_api.c 	chain0 = block->chain0.chain;
block             815 net/sched/cls_api.c 		list_add(&item->list, &block->chain0.filter_chain_list);
block             816 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             827 net/sched/cls_api.c 		mutex_lock(&block->lock);
block             828 net/sched/cls_api.c 		list_add(&item->list, &block->chain0.filter_chain_list);
block             829 net/sched/cls_api.c 		mutex_unlock(&block->lock);
block             839 net/sched/cls_api.c tcf_chain0_head_change_cb_del(struct tcf_block *block,
block             844 net/sched/cls_api.c 	mutex_lock(&block->lock);
block             845 net/sched/cls_api.c 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
block             849 net/sched/cls_api.c 			if (block->chain0.chain)
block             852 net/sched/cls_api.c 			mutex_unlock(&block->lock);
block             858 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             869 net/sched/cls_api.c static int tcf_block_insert(struct tcf_block *block, struct net *net,
block             877 net/sched/cls_api.c 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
block             885 net/sched/cls_api.c static void tcf_block_remove(struct tcf_block *block, struct net *net)
block             890 net/sched/cls_api.c 	idr_remove(&tn->idr, block->index);
block             898 net/sched/cls_api.c 	struct tcf_block *block;
block             900 net/sched/cls_api.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             901 net/sched/cls_api.c 	if (!block) {
block             905 net/sched/cls_api.c 	mutex_init(&block->lock);
block             906 net/sched/cls_api.c 	mutex_init(&block->proto_destroy_lock);
block             907 net/sched/cls_api.c 	init_rwsem(&block->cb_lock);
block             908 net/sched/cls_api.c 	flow_block_init(&block->flow_block);
block             909 net/sched/cls_api.c 	INIT_LIST_HEAD(&block->chain_list);
block             910 net/sched/cls_api.c 	INIT_LIST_HEAD(&block->owner_list);
block             911 net/sched/cls_api.c 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
block             913 net/sched/cls_api.c 	refcount_set(&block->refcnt, 1);
block             914 net/sched/cls_api.c 	block->net = net;
block             915 net/sched/cls_api.c 	block->index = block_index;
block             918 net/sched/cls_api.c 	if (!tcf_block_shared(block))
block             919 net/sched/cls_api.c 		block->q = q;
block             920 net/sched/cls_api.c 	return block;
block             932 net/sched/cls_api.c 	struct tcf_block *block;
block             935 net/sched/cls_api.c 	block = tcf_block_lookup(net, block_index);
block             936 net/sched/cls_api.c 	if (block && !refcount_inc_not_zero(&block->refcnt))
block             937 net/sched/cls_api.c 		block = NULL;
block             940 net/sched/cls_api.c 	return block;
block             944 net/sched/cls_api.c __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
block             946 net/sched/cls_api.c 	mutex_lock(&block->lock);
block             948 net/sched/cls_api.c 		chain = list_is_last(&chain->list, &block->chain_list) ?
block             951 net/sched/cls_api.c 		chain = list_first_entry_or_null(&block->chain_list,
block             956 net/sched/cls_api.c 		chain = list_is_last(&chain->list, &block->chain_list) ?
block             961 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block             976 net/sched/cls_api.c tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
block             978 net/sched/cls_api.c 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
block            1041 net/sched/cls_api.c static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
block            1048 net/sched/cls_api.c 	for (chain = tcf_get_next_chain(block, NULL);
block            1050 net/sched/cls_api.c 	     chain = tcf_get_next_chain(block, chain)) {
block            1160 net/sched/cls_api.c 	struct tcf_block *block;
block            1163 net/sched/cls_api.c 		block = tcf_block_refcnt_get(net, block_index);
block            1164 net/sched/cls_api.c 		if (!block) {
block            1171 net/sched/cls_api.c 		block = cops->tcf_block(q, cl, extack);
block            1172 net/sched/cls_api.c 		if (!block)
block            1175 net/sched/cls_api.c 		if (tcf_block_shared(block)) {
block            1186 net/sched/cls_api.c 		refcount_inc(&block->refcnt);
block            1189 net/sched/cls_api.c 	return block;
block            1192 net/sched/cls_api.c static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
block            1195 net/sched/cls_api.c 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
block            1202 net/sched/cls_api.c 		bool free_block = list_empty(&block->chain_list);
block            1204 net/sched/cls_api.c 		mutex_unlock(&block->lock);
block            1205 net/sched/cls_api.c 		if (tcf_block_shared(block))
block            1206 net/sched/cls_api.c 			tcf_block_remove(block, block->net);
block            1209 net/sched/cls_api.c 			tcf_block_offload_unbind(block, q, ei);
block            1212 net/sched/cls_api.c 			tcf_block_destroy(block);
block            1214 net/sched/cls_api.c 			tcf_block_flush_all_chains(block, rtnl_held);
block            1216 net/sched/cls_api.c 		tcf_block_offload_unbind(block, q, ei);
block            1220 net/sched/cls_api.c static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
block            1222 net/sched/cls_api.c 	__tcf_block_put(block, NULL, NULL, rtnl_held);
block            1234 net/sched/cls_api.c 	struct tcf_block *block;
block            1247 net/sched/cls_api.c 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
block            1248 net/sched/cls_api.c 	if (IS_ERR(block)) {
block            1249 net/sched/cls_api.c 		err = PTR_ERR(block);
block            1253 net/sched/cls_api.c 	return block;
block            1263 net/sched/cls_api.c static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
block            1266 net/sched/cls_api.c 	if (!IS_ERR_OR_NULL(block))
block            1267 net/sched/cls_api.c 		tcf_block_refcnt_put(block, rtnl_held);
block            1284 net/sched/cls_api.c tcf_block_owner_netif_keep_dst(struct tcf_block *block,
block            1288 net/sched/cls_api.c 	if (block->keep_dst &&
block            1294 net/sched/cls_api.c void tcf_block_netif_keep_dst(struct tcf_block *block)
block            1298 net/sched/cls_api.c 	block->keep_dst = true;
block            1299 net/sched/cls_api.c 	list_for_each_entry(item, &block->owner_list, list)
block            1300 net/sched/cls_api.c 		tcf_block_owner_netif_keep_dst(block, item->q,
block            1305 net/sched/cls_api.c static int tcf_block_owner_add(struct tcf_block *block,
block            1316 net/sched/cls_api.c 	list_add(&item->list, &block->owner_list);
block            1320 net/sched/cls_api.c static void tcf_block_owner_del(struct tcf_block *block,
block            1326 net/sched/cls_api.c 	list_for_each_entry(item, &block->owner_list, list) {
block            1341 net/sched/cls_api.c 	struct tcf_block *block = NULL;
block            1346 net/sched/cls_api.c 		block = tcf_block_refcnt_get(net, ei->block_index);
block            1348 net/sched/cls_api.c 	if (!block) {
block            1349 net/sched/cls_api.c 		block = tcf_block_create(net, q, ei->block_index, extack);
block            1350 net/sched/cls_api.c 		if (IS_ERR(block))
block            1351 net/sched/cls_api.c 			return PTR_ERR(block);
block            1352 net/sched/cls_api.c 		if (tcf_block_shared(block)) {
block            1353 net/sched/cls_api.c 			err = tcf_block_insert(block, net, extack);
block            1359 net/sched/cls_api.c 	err = tcf_block_owner_add(block, q, ei->binder_type);
block            1363 net/sched/cls_api.c 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
block            1365 net/sched/cls_api.c 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
block            1369 net/sched/cls_api.c 	err = tcf_block_offload_bind(block, q, ei, extack);
block            1373 net/sched/cls_api.c 	*p_block = block;
block            1377 net/sched/cls_api.c 	tcf_chain0_head_change_cb_del(block, ei);
block            1379 net/sched/cls_api.c 	tcf_block_owner_del(block, q, ei->binder_type);
block            1382 net/sched/cls_api.c 	tcf_block_refcnt_put(block, true);
block            1411 net/sched/cls_api.c void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
block            1414 net/sched/cls_api.c 	if (!block)
block            1416 net/sched/cls_api.c 	tcf_chain0_head_change_cb_del(block, ei);
block            1417 net/sched/cls_api.c 	tcf_block_owner_del(block, q, ei->binder_type);
block            1419 net/sched/cls_api.c 	__tcf_block_put(block, q, ei, true);
block            1423 net/sched/cls_api.c void tcf_block_put(struct tcf_block *block)
block            1427 net/sched/cls_api.c 	if (!block)
block            1429 net/sched/cls_api.c 	tcf_block_put_ext(block, block->q, &ei);
block            1435 net/sched/cls_api.c tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
block            1443 net/sched/cls_api.c 	lockdep_assert_held(&block->cb_lock);
block            1445 net/sched/cls_api.c 	for (chain = __tcf_get_next_chain(block, NULL);
block            1448 net/sched/cls_api.c 		     chain = __tcf_get_next_chain(block, chain),
block            1472 net/sched/cls_api.c 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
block            1477 net/sched/cls_api.c static int tcf_block_bind(struct tcf_block *block,
block            1483 net/sched/cls_api.c 	lockdep_assert_held(&block->cb_lock);
block            1486 net/sched/cls_api.c 		err = tcf_block_playback_offloads(block, block_cb->cb,
block            1488 net/sched/cls_api.c 						  tcf_block_offload_in_use(block),
block            1493 net/sched/cls_api.c 			block->lockeddevcnt++;
block            1497 net/sched/cls_api.c 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
block            1505 net/sched/cls_api.c 			tcf_block_playback_offloads(block, block_cb->cb,
block            1507 net/sched/cls_api.c 						    tcf_block_offload_in_use(block),
block            1510 net/sched/cls_api.c 				block->lockeddevcnt--;
block            1518 net/sched/cls_api.c static void tcf_block_unbind(struct tcf_block *block,
block            1523 net/sched/cls_api.c 	lockdep_assert_held(&block->cb_lock);
block            1526 net/sched/cls_api.c 		tcf_block_playback_offloads(block, block_cb->cb,
block            1528 net/sched/cls_api.c 					    tcf_block_offload_in_use(block),
block            1533 net/sched/cls_api.c 			block->lockeddevcnt--;
block            1537 net/sched/cls_api.c static int tcf_block_setup(struct tcf_block *block,
block            1544 net/sched/cls_api.c 		err = tcf_block_bind(block, bo);
block            1548 net/sched/cls_api.c 		tcf_block_unbind(block, bo);
block            1612 net/sched/cls_api.c 				       tp->chain->block->index,
block            1782 net/sched/cls_api.c 			 struct tcf_proto *tp, struct tcf_block *block,
block            1803 net/sched/cls_api.c 		tcm->tcm_block_index = block->index;
block            1828 net/sched/cls_api.c 			  struct tcf_block *block, struct Qdisc *q,
block            1840 net/sched/cls_api.c 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
block            1860 net/sched/cls_api.c 			      struct tcf_block *block, struct Qdisc *q,
block            1872 net/sched/cls_api.c 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
block            1900 net/sched/cls_api.c 				 struct tcf_block *block, struct Qdisc *q,
block            1909 net/sched/cls_api.c 		tfilter_notify(net, oskb, n, tp, block,
block            1934 net/sched/cls_api.c 	struct tcf_block *block;
block            1960 net/sched/cls_api.c 	block = NULL;
block            2002 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
block            2004 net/sched/cls_api.c 	if (IS_ERR(block)) {
block            2005 net/sched/cls_api.c 		err = PTR_ERR(block);
block            2008 net/sched/cls_api.c 	block->classid = parent;
block            2016 net/sched/cls_api.c 	chain = tcf_chain_get(block, chain_index, true);
block            2108 net/sched/cls_api.c 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
block            2126 net/sched/cls_api.c 	tcf_block_release(q, block, rtnl_held);
block            2160 net/sched/cls_api.c 	struct tcf_block *block = NULL;
block            2211 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
block            2213 net/sched/cls_api.c 	if (IS_ERR(block)) {
block            2214 net/sched/cls_api.c 		err = PTR_ERR(block);
block            2224 net/sched/cls_api.c 	chain = tcf_chain_get(block, chain_index, false);
block            2239 net/sched/cls_api.c 		tfilter_notify_chain(net, skb, block, q, parent, n,
block            2263 net/sched/cls_api.c 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
block            2278 net/sched/cls_api.c 		err = tfilter_del_notify(net, skb, n, tp, block,
block            2294 net/sched/cls_api.c 	tcf_block_release(q, block, rtnl_held);
block            2320 net/sched/cls_api.c 	struct tcf_block *block = NULL;
block            2367 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
block            2369 net/sched/cls_api.c 	if (IS_ERR(block)) {
block            2370 net/sched/cls_api.c 		err = PTR_ERR(block);
block            2380 net/sched/cls_api.c 	chain = tcf_chain_get(block, chain_index, false);
block            2407 net/sched/cls_api.c 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
block            2420 net/sched/cls_api.c 	tcf_block_release(q, block, rtnl_held);
block            2432 net/sched/cls_api.c 	struct tcf_block *block;
block            2442 net/sched/cls_api.c 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
block            2453 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block            2476 net/sched/cls_api.c 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
block            2488 net/sched/cls_api.c 		arg.block = block;
block            2515 net/sched/cls_api.c 	struct tcf_block *block;
block            2531 net/sched/cls_api.c 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
block            2532 net/sched/cls_api.c 		if (!block)
block            2567 net/sched/cls_api.c 		block = cops->tcf_block(q, cl, NULL);
block            2568 net/sched/cls_api.c 		if (!block)
block            2570 net/sched/cls_api.c 		parent = block->classid;
block            2571 net/sched/cls_api.c 		if (tcf_block_shared(block))
block            2578 net/sched/cls_api.c 	for (chain = __tcf_get_next_chain(block, NULL);
block            2581 net/sched/cls_api.c 		     chain = __tcf_get_next_chain(block, chain),
block            2595 net/sched/cls_api.c 		tcf_block_refcnt_put(block, true);
block            2608 net/sched/cls_api.c 			      struct tcf_block *block,
block            2628 net/sched/cls_api.c 	if (block->q) {
block            2629 net/sched/cls_api.c 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
block            2630 net/sched/cls_api.c 		tcm->tcm_parent = block->q->handle;
block            2633 net/sched/cls_api.c 		tcm->tcm_block_index = block->index;
block            2659 net/sched/cls_api.c 	struct tcf_block *block = chain->block;
block            2660 net/sched/cls_api.c 	struct net *net = block->net;
block            2669 net/sched/cls_api.c 			       chain->index, net, skb, block, portid,
block            2688 net/sched/cls_api.c 				  struct tcf_block *block, struct sk_buff *oskb,
block            2692 net/sched/cls_api.c 	struct net *net = block->net;
block            2700 net/sched/cls_api.c 			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
block            2769 net/sched/cls_api.c 	struct tcf_block *block;
block            2787 net/sched/cls_api.c 	block = tcf_block_find(net, &q, &parent, &cl,
block            2789 net/sched/cls_api.c 	if (IS_ERR(block))
block            2790 net/sched/cls_api.c 		return PTR_ERR(block);
block            2799 net/sched/cls_api.c 	mutex_lock(&block->lock);
block            2800 net/sched/cls_api.c 	chain = tcf_chain_lookup(block, chain_index);
block            2819 net/sched/cls_api.c 			chain = tcf_chain_create(block, chain_index);
block            2844 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block            2858 net/sched/cls_api.c 		tfilter_notify_chain(net, skb, block, q, parent, n,
block            2882 net/sched/cls_api.c 	tcf_block_release(q, block, true);
block            2889 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block            2899 net/sched/cls_api.c 	struct tcf_block *block;
block            2916 net/sched/cls_api.c 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
block            2917 net/sched/cls_api.c 		if (!block)
block            2954 net/sched/cls_api.c 		block = cops->tcf_block(q, cl, NULL);
block            2955 net/sched/cls_api.c 		if (!block)
block            2957 net/sched/cls_api.c 		if (tcf_block_shared(block))
block            2964 net/sched/cls_api.c 	mutex_lock(&block->lock);
block            2965 net/sched/cls_api.c 	list_for_each_entry(chain, &block->chain_list, list) {
block            2976 net/sched/cls_api.c 					 chain->index, net, skb, block,
block            2984 net/sched/cls_api.c 	mutex_unlock(&block->lock);
block            2987 net/sched/cls_api.c 		tcf_block_refcnt_put(block, true);
block            3126 net/sched/cls_api.c static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
block            3131 net/sched/cls_api.c 	atomic_inc(&block->offloadcnt);
block            3134 net/sched/cls_api.c static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
block            3139 net/sched/cls_api.c 	atomic_dec(&block->offloadcnt);
block            3142 net/sched/cls_api.c static void tc_cls_offload_cnt_update(struct tcf_block *block,
block            3146 net/sched/cls_api.c 	lockdep_assert_held(&block->cb_lock);
block            3151 net/sched/cls_api.c 			tcf_block_offload_inc(block, flags);
block            3156 net/sched/cls_api.c 			tcf_block_offload_dec(block, flags);
block            3162 net/sched/cls_api.c tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
block            3165 net/sched/cls_api.c 	lockdep_assert_held(&block->cb_lock);
block            3168 net/sched/cls_api.c 	tcf_block_offload_dec(block, flags);
block            3174 net/sched/cls_api.c __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
block            3181 net/sched/cls_api.c 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
block            3193 net/sched/cls_api.c int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
block            3196 net/sched/cls_api.c 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
block            3202 net/sched/cls_api.c 	down_read(&block->cb_lock);
block            3207 net/sched/cls_api.c 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
block            3208 net/sched/cls_api.c 		up_read(&block->cb_lock);
block            3213 net/sched/cls_api.c 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
block            3215 net/sched/cls_api.c 	up_read(&block->cb_lock);
block            3228 net/sched/cls_api.c int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
block            3232 net/sched/cls_api.c 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
block            3238 net/sched/cls_api.c 	down_read(&block->cb_lock);
block            3243 net/sched/cls_api.c 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
block            3244 net/sched/cls_api.c 		up_read(&block->cb_lock);
block            3250 net/sched/cls_api.c 	if (block->nooffloaddevcnt && err_stop) {
block            3255 net/sched/cls_api.c 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
block            3262 net/sched/cls_api.c 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
block            3265 net/sched/cls_api.c 	up_read(&block->cb_lock);
block            3278 net/sched/cls_api.c int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
block            3284 net/sched/cls_api.c 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
block            3290 net/sched/cls_api.c 	down_read(&block->cb_lock);
block            3295 net/sched/cls_api.c 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
block            3296 net/sched/cls_api.c 		up_read(&block->cb_lock);
block            3302 net/sched/cls_api.c 	if (block->nooffloaddevcnt && err_stop) {
block            3307 net/sched/cls_api.c 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
block            3311 net/sched/cls_api.c 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
block            3318 net/sched/cls_api.c 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
block            3321 net/sched/cls_api.c 	up_read(&block->cb_lock);
block            3332 net/sched/cls_api.c int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
block            3336 net/sched/cls_api.c 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
block            3342 net/sched/cls_api.c 	down_read(&block->cb_lock);
block            3347 net/sched/cls_api.c 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
block            3348 net/sched/cls_api.c 		up_read(&block->cb_lock);
block            3353 net/sched/cls_api.c 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
block            3355 net/sched/cls_api.c 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
block            3359 net/sched/cls_api.c 	up_read(&block->cb_lock);
block            3366 net/sched/cls_api.c int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
block            3377 net/sched/cls_api.c 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
block             148 net/sched/cls_bpf.c 	struct tcf_block *block = tp->chain->block;
block             166 net/sched/cls_bpf.c 		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
block             172 net/sched/cls_bpf.c 		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
block             176 net/sched/cls_bpf.c 		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
block             229 net/sched/cls_bpf.c 	struct tcf_block *block = tp->chain->block;
block             239 net/sched/cls_bpf.c 	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
block             403 net/sched/cls_bpf.c 		tcf_block_netif_keep_dst(tp->chain->block);
block             669 net/sched/cls_bpf.c 	struct tcf_block *block = tp->chain->block;
block             687 net/sched/cls_bpf.c 		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
block             503 net/sched/cls_flow.c 			struct Qdisc *q = tcf_block_q(tp->chain->block);
block             518 net/sched/cls_flow.c 	tcf_block_netif_keep_dst(tp->chain->block);
block             418 net/sched/cls_flower.c 	struct tcf_block *block = tp->chain->block;
block             425 net/sched/cls_flower.c 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
block             434 net/sched/cls_flower.c 	struct tcf_block *block = tp->chain->block;
block             462 net/sched/cls_flower.c 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
block             481 net/sched/cls_flower.c 	struct tcf_block *block = tp->chain->block;
block             489 net/sched/cls_flower.c 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
block            1787 net/sched/cls_flower.c 	struct tcf_block *block = tp->chain->block;
block            1829 net/sched/cls_flower.c 		err = tc_setup_cb_reoffload(block, tp, add, cb,
block            1875 net/sched/cls_flower.c 	struct tcf_block *block = chain->block;
block            1891 net/sched/cls_flower.c 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
block            1901 net/sched/cls_flower.c 	struct tcf_block *block = chain->block;
block            1907 net/sched/cls_flower.c 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
block              75 net/sched/cls_fw.c 		struct Qdisc *q = tcf_block_q(tp->chain->block);
block              72 net/sched/cls_matchall.c 	struct tcf_block *block = tp->chain->block;
block              78 net/sched/cls_matchall.c 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
block              88 net/sched/cls_matchall.c 	struct tcf_block *block = tp->chain->block;
block             112 net/sched/cls_matchall.c 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
block             290 net/sched/cls_matchall.c 	struct tcf_block *block = tp->chain->block;
block             315 net/sched/cls_matchall.c 	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
block             332 net/sched/cls_matchall.c 	struct tcf_block *block = tp->chain->block;
block             338 net/sched/cls_matchall.c 	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
block             525 net/sched/cls_route.c 	tcf_block_netif_keep_dst(tp->chain->block);
block             114 net/sched/cls_tcindex.c 		struct Qdisc *q = tcf_block_q(tp->chain->block);
block             321 net/sched/cls_u32.c 	struct tcf_block *block = tp->chain->block;
block             329 net/sched/cls_u32.c 	if (tcf_block_shared(block))
block             330 net/sched/cls_u32.c 		return block;
block             332 net/sched/cls_u32.c 		return block->q;
block             474 net/sched/cls_u32.c 	struct tcf_block *block = tp->chain->block;
block             483 net/sched/cls_u32.c 	tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
block             489 net/sched/cls_u32.c 	struct tcf_block *block = tp->chain->block;
block             501 net/sched/cls_u32.c 	err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
block             518 net/sched/cls_u32.c 	struct tcf_block *block = tp->chain->block;
block             525 net/sched/cls_u32.c 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
block             533 net/sched/cls_u32.c 	struct tcf_block *block = tp->chain->block;
block             555 net/sched/cls_u32.c 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
block            1177 net/sched/cls_u32.c 	struct tcf_block *block = tp->chain->block;
block            1202 net/sched/cls_u32.c 	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
block             177 net/sched/ematch.c 	struct net *net = tp->chain->block->net;
block            1904 net/sched/sch_api.c 		struct Qdisc *q = tcf_block_q(tp->chain->block);
block            1925 net/sched/sch_api.c 	struct tcf_block *block;
block            1928 net/sched/sch_api.c 	block = cops->tcf_block(q, cl, NULL);
block            1929 net/sched/sch_api.c 	if (!block)
block            1931 net/sched/sch_api.c 	for (chain = tcf_get_next_chain(block, NULL);
block            1933 net/sched/sch_api.c 	     chain = tcf_get_next_chain(block, chain)) {
block              48 net/sched/sch_atm.c 	struct tcf_block	*block;
block             155 net/sched/sch_atm.c 	tcf_block_put(flow->block);
block             288 net/sched/sch_atm.c 	error = tcf_block_get(&flow->block, &flow->filter_list, sch,
block             372 net/sched/sch_atm.c 	return flow ? flow->block : p->link.block;
block             557 net/sched/sch_atm.c 	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
block             588 net/sched/sch_atm.c 		tcf_block_put(flow->block);
block             589 net/sched/sch_atm.c 		flow->block = NULL;
block             202 net/sched/sch_cake.c 	struct tcf_block *block;
block            2655 net/sched/sch_cake.c 	tcf_block_put(q->block);
block            2688 net/sched/sch_cake.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block            2935 net/sched/sch_cake.c 	return q->block;
block             125 net/sched/sch_cbq.c 	struct tcf_block	*block;
block            1184 net/sched/sch_cbq.c 	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
block            1225 net/sched/sch_cbq.c 	tcf_block_put(q->link.block);
block            1439 net/sched/sch_cbq.c 	tcf_block_put(cl->block);
block            1464 net/sched/sch_cbq.c 			tcf_block_put(cl->block);
block            1465 net/sched/sch_cbq.c 			cl->block = NULL;
block            1614 net/sched/sch_cbq.c 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
block            1627 net/sched/sch_cbq.c 			tcf_block_put(cl->block);
block            1725 net/sched/sch_cbq.c 	return cl->block;
block              35 net/sched/sch_drr.c 	struct tcf_block		*block;
block             183 net/sched/sch_drr.c 	return q->block;
block             423 net/sched/sch_drr.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block             457 net/sched/sch_drr.c 	tcf_block_put(q->block);
block              48 net/sched/sch_dsmark.c 	struct tcf_block	*block;
block             196 net/sched/sch_dsmark.c 	return p->block;
block             354 net/sched/sch_dsmark.c 	err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
block             420 net/sched/sch_dsmark.c 	tcf_block_put(p->block);
block              53 net/sched/sch_fq_codel.c 	struct tcf_block *block;
block             445 net/sched/sch_fq_codel.c 	tcf_block_put(q->block);
block             475 net/sched/sch_fq_codel.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block             605 net/sched/sch_fq_codel.c 	return q->block;
block             118 net/sched/sch_hfsc.c 	struct tcf_block *block;
block            1028 net/sched/sch_hfsc.c 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
block            1040 net/sched/sch_hfsc.c 			tcf_block_put(cl->block);
block            1085 net/sched/sch_hfsc.c 	tcf_block_put(cl->block);
block            1250 net/sched/sch_hfsc.c 	return cl->block;
block            1404 net/sched/sch_hfsc.c 	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
block            1501 net/sched/sch_hfsc.c 			tcf_block_put(cl->block);
block            1502 net/sched/sch_hfsc.c 			cl->block = NULL;
block             104 net/sched/sch_htb.c 	struct tcf_block	*block;
block             155 net/sched/sch_htb.c 	struct tcf_block	*block;
block            1009 net/sched/sch_htb.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block            1214 net/sched/sch_htb.c 	tcf_block_put(cl->block);
block            1232 net/sched/sch_htb.c 	tcf_block_put(q->block);
block            1236 net/sched/sch_htb.c 			tcf_block_put(cl->block);
block            1237 net/sched/sch_htb.c 			cl->block = NULL;
block            1371 net/sched/sch_htb.c 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
block            1383 net/sched/sch_htb.c 				tcf_block_put(cl->block);
block            1505 net/sched/sch_htb.c 	return cl ? cl->block : q->block;
block              18 net/sched/sch_ingress.c 	struct tcf_block *block;
block              52 net/sched/sch_ingress.c 	return q->block;
block              90 net/sched/sch_ingress.c 	return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
block              97 net/sched/sch_ingress.c 	tcf_block_put_ext(q->block, sch, &q->block_info);
block              24 net/sched/sch_multiq.c 	struct tcf_block *block;
block             165 net/sched/sch_multiq.c 	tcf_block_put(q->block);
block             247 net/sched/sch_multiq.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block             377 net/sched/sch_multiq.c 	return q->block;
block              24 net/sched/sch_prio.c 	struct tcf_block *block;
block             172 net/sched/sch_prio.c 	tcf_block_put(q->block);
block             240 net/sched/sch_prio.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block             400 net/sched/sch_prio.c 	return q->block;
block             181 net/sched/sch_qfq.c 	struct tcf_block	*block;
block             564 net/sched/sch_qfq.c 	return q->block;
block            1418 net/sched/sch_qfq.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block            1476 net/sched/sch_qfq.c 	tcf_block_put(q->block);
block              55 net/sched/sch_sfb.c 	struct tcf_block *block;
block             468 net/sched/sch_sfb.c 	tcf_block_put(q->block);
block             557 net/sched/sch_sfb.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block             676 net/sched/sch_sfb.c 	return q->block;
block             125 net/sched/sch_sfq.c 	struct tcf_block *block;
block             723 net/sched/sch_sfq.c 	tcf_block_put(q->block);
block             741 net/sched/sch_sfq.c 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
block             852 net/sched/sch_sfq.c 	return q->block;
block              42 scripts/mod/sumversion.c 	uint32_t block[MD4_BLOCK_WORDS];
block             156 scripts/mod/sumversion.c 	le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(uint32_t));
block             157 scripts/mod/sumversion.c 	md4_transform(ctx->hash, ctx->block);
block             172 scripts/mod/sumversion.c 	const uint32_t avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
block             177 scripts/mod/sumversion.c 		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block             182 scripts/mod/sumversion.c 	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
block             189 scripts/mod/sumversion.c 	while (len >= sizeof(mctx->block)) {
block             190 scripts/mod/sumversion.c 		memcpy(mctx->block, data, sizeof(mctx->block));
block             192 scripts/mod/sumversion.c 		data += sizeof(mctx->block);
block             193 scripts/mod/sumversion.c 		len -= sizeof(mctx->block);
block             196 scripts/mod/sumversion.c 	memcpy(mctx->block, data, len);
block             202 scripts/mod/sumversion.c 	char *p = (char *)mctx->block + offset;
block             209 scripts/mod/sumversion.c 		p = (char *)mctx->block;
block             214 scripts/mod/sumversion.c 	mctx->block[14] = mctx->byte_count << 3;
block             215 scripts/mod/sumversion.c 	mctx->block[15] = mctx->byte_count >> 29;
block             216 scripts/mod/sumversion.c 	le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
block             218 scripts/mod/sumversion.c 	md4_transform(mctx->hash, mctx->block);
block             156 sound/aoa/codecs/tas.c 	u8 block[6];
block             176 sound/aoa/codecs/tas.c 	block[0] = tmp>>20;
block             177 sound/aoa/codecs/tas.c 	block[1] = tmp>>12;
block             178 sound/aoa/codecs/tas.c 	block[2] = tmp>>4;
block             180 sound/aoa/codecs/tas.c 	block[3] = tmp>>20;
block             181 sound/aoa/codecs/tas.c 	block[4] = tmp>>12;
block             182 sound/aoa/codecs/tas.c 	block[5] = tmp>>4;
block             183 sound/aoa/codecs/tas.c 	tas_write_reg(tas, TAS_REG_VOL, 6, block);
block             188 sound/aoa/codecs/tas.c 	u8 block[9];
block             196 sound/aoa/codecs/tas.c 		block[3*i+0] = tmp>>16;
block             197 sound/aoa/codecs/tas.c 		block[3*i+1] = tmp>>8;
block             198 sound/aoa/codecs/tas.c 		block[3*i+2] = tmp;
block             200 sound/aoa/codecs/tas.c 	tas_write_reg(tas, TAS_REG_LMIX, 9, block);
block             206 sound/aoa/codecs/tas.c 		block[3*i+0] = tmp>>16;
block             207 sound/aoa/codecs/tas.c 		block[3*i+1] = tmp>>8;
block             208 sound/aoa/codecs/tas.c 		block[3*i+2] = tmp;
block             210 sound/aoa/codecs/tas.c 	tas_write_reg(tas, TAS_REG_RMIX, 9, block);
block            2335 sound/core/oss/pcm_oss.c 	else if (setup->block)
block            2923 sound/core/oss/pcm_oss.c 			    setup->block ? " block" : "",
block            2985 sound/core/oss/pcm_oss.c 				template.block = 1;
block              82 sound/drivers/opl3/opl3_midi.c 	int block = ((note / 12) & 0x07) - 1;
block             105 sound/drivers/opl3/opl3_midi.c 		((block << 2) & OPL3_BLOCKNUM_MASK);
block              82 sound/isa/gus/gus_dma.c 	struct snd_gf1_dma_block *block;
block              86 sound/isa/gus/gus_dma.c 		block = gus->gf1.dma_data_pcm;
block              87 sound/isa/gus/gus_dma.c 		if (gus->gf1.dma_data_pcm_last == block) {
block              91 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_pcm = block->next;
block              94 sound/isa/gus/gus_dma.c 		block = gus->gf1.dma_data_synth;
block              95 sound/isa/gus/gus_dma.c 		if (gus->gf1.dma_data_synth_last == block) {
block              99 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_synth = block->next;
block             102 sound/isa/gus/gus_dma.c 		block = NULL;
block             104 sound/isa/gus/gus_dma.c 	if (block) {
block             105 sound/isa/gus/gus_dma.c 		gus->gf1.dma_ack = block->ack;
block             106 sound/isa/gus/gus_dma.c 		gus->gf1.dma_private_data = block->private_data;
block             108 sound/isa/gus/gus_dma.c 	return block;
block             114 sound/isa/gus/gus_dma.c 	struct snd_gf1_dma_block *block;
block             127 sound/isa/gus/gus_dma.c 	block = snd_gf1_dma_next_block(gus);
block             129 sound/isa/gus/gus_dma.c 	snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
block             130 sound/isa/gus/gus_dma.c 	kfree(block);
block             134 sound/isa/gus/gus_dma.c 		   block->addr, block->buf_addr, block->count, block->cmd);
block             157 sound/isa/gus/gus_dma.c 	struct snd_gf1_dma_block *block;
block             165 sound/isa/gus/gus_dma.c 		while ((block = gus->gf1.dma_data_pcm)) {
block             166 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_pcm = block->next;
block             167 sound/isa/gus/gus_dma.c 			kfree(block);
block             169 sound/isa/gus/gus_dma.c 		while ((block = gus->gf1.dma_data_synth)) {
block             170 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_synth = block->next;
block             171 sound/isa/gus/gus_dma.c 			kfree(block);
block             186 sound/isa/gus/gus_dma.c 	struct snd_gf1_dma_block *block;
block             188 sound/isa/gus/gus_dma.c 	block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
block             189 sound/isa/gus/gus_dma.c 	if (!block)
block             192 sound/isa/gus/gus_dma.c 	*block = *__block;
block             193 sound/isa/gus/gus_dma.c 	block->next = NULL;
block             196 sound/isa/gus/gus_dma.c 		    block->addr, (long) block->buffer, block->count,
block             197 sound/isa/gus/gus_dma.c 		    block->cmd);
block             207 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_synth_last->next = block;
block             208 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_synth_last = block;
block             211 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_synth_last = block;
block             215 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_pcm_last->next = block;
block             216 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_pcm_last = block;
block             219 sound/isa/gus/gus_dma.c 			gus->gf1.dma_data_pcm_last = block;
block             224 sound/isa/gus/gus_dma.c 		block = snd_gf1_dma_next_block(gus);
block             226 sound/isa/gus/gus_dma.c 		if (block == NULL)
block             228 sound/isa/gus/gus_dma.c 		snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
block             229 sound/isa/gus/gus_dma.c 		kfree(block);
block              28 sound/isa/gus/gus_mem.c 					       struct snd_gf1_mem_block * block)
block              35 sound/isa/gus/gus_mem.c 	*nblock = *block;
block              63 sound/isa/gus/gus_mem.c int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block)
block              65 sound/isa/gus/gus_mem.c 	if (block->share) {	/* ok.. shared block */
block              66 sound/isa/gus/gus_mem.c 		block->share--;
block              70 sound/isa/gus/gus_mem.c 	if (alloc->first == block) {
block              71 sound/isa/gus/gus_mem.c 		alloc->first = block->next;
block              72 sound/isa/gus/gus_mem.c 		if (block->next)
block              73 sound/isa/gus/gus_mem.c 			block->next->prev = NULL;
block              75 sound/isa/gus/gus_mem.c 		block->prev->next = block->next;
block              76 sound/isa/gus/gus_mem.c 		if (block->next)
block              77 sound/isa/gus/gus_mem.c 			block->next->prev = block->prev;
block              79 sound/isa/gus/gus_mem.c 	if (alloc->last == block) {
block              80 sound/isa/gus/gus_mem.c 		alloc->last = block->prev;
block              81 sound/isa/gus/gus_mem.c 		if (block->prev)
block              82 sound/isa/gus/gus_mem.c 			block->prev->next = NULL;
block              84 sound/isa/gus/gus_mem.c 		block->next->prev = block->prev;
block              85 sound/isa/gus/gus_mem.c 		if (block->prev)
block              86 sound/isa/gus/gus_mem.c 			block->prev->next = block->next;
block              88 sound/isa/gus/gus_mem.c 	kfree(block->name);
block              89 sound/isa/gus/gus_mem.c 	kfree(block);
block              96 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block *block;
block              98 sound/isa/gus/gus_mem.c 	for (block = alloc->first; block; block = block->next) {
block              99 sound/isa/gus/gus_mem.c 		if (block->ptr == address) {
block             100 sound/isa/gus/gus_mem.c 			return block;
block             109 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block *block;
block             114 sound/isa/gus/gus_mem.c 	for (block = alloc->first; block; block = block->next)
block             115 sound/isa/gus/gus_mem.c 		if (!memcmp(share_id, block->share_id,
block             116 sound/isa/gus/gus_mem.c 				sizeof(block->share_id)))
block             117 sound/isa/gus/gus_mem.c 			return block;
block             122 sound/isa/gus/gus_mem.c 			    struct snd_gf1_mem_block * block,
block             133 sound/isa/gus/gus_mem.c 	block->flags = w_16 ? SNDRV_GF1_MEM_BLOCK_16BIT : 0;
block             134 sound/isa/gus/gus_mem.c 	block->owner = SNDRV_GF1_MEM_OWNER_DRIVER;
block             135 sound/isa/gus/gus_mem.c 	block->share = 0;
block             136 sound/isa/gus/gus_mem.c 	block->share_id[0] = block->share_id[1] =
block             137 sound/isa/gus/gus_mem.c 	block->share_id[2] = block->share_id[3] = 0;
block             138 sound/isa/gus/gus_mem.c 	block->name = NULL;
block             139 sound/isa/gus/gus_mem.c 	block->prev = block->next = NULL;
block             157 sound/isa/gus/gus_mem.c 			block->ptr = ptr1;
block             158 sound/isa/gus/gus_mem.c 			block->size = size;
block             165 sound/isa/gus/gus_mem.c 			block->ptr = info[idx].address;
block             166 sound/isa/gus/gus_mem.c 			block->size = size;
block             177 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block block, *nblock;
block             194 sound/isa/gus/gus_mem.c 	if (snd_gf1_mem_find(alloc, &block, size, w_16, align) < 0) {
block             199 sound/isa/gus/gus_mem.c 		memcpy(&block.share_id, share_id, sizeof(block.share_id));
block             200 sound/isa/gus/gus_mem.c 	block.owner = owner;
block             201 sound/isa/gus/gus_mem.c 	block.name = kstrdup(name, GFP_KERNEL);
block             202 sound/isa/gus/gus_mem.c 	nblock = snd_gf1_mem_xalloc(alloc, &block);
block             210 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block *block;
block             213 sound/isa/gus/gus_mem.c 	if ((block = snd_gf1_mem_look(alloc, address)) != NULL) {
block             214 sound/isa/gus/gus_mem.c 		result = snd_gf1_mem_xfree(alloc, block);
block             225 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block block;
block             233 sound/isa/gus/gus_mem.c 	memset(&block, 0, sizeof(block));
block             234 sound/isa/gus/gus_mem.c 	block.owner = SNDRV_GF1_MEM_OWNER_DRIVER;
block             236 sound/isa/gus/gus_mem.c 		block.ptr = 0;
block             237 sound/isa/gus/gus_mem.c 		block.size = 1024;
block             238 sound/isa/gus/gus_mem.c 		block.name = kstrdup("InterWave LFOs", GFP_KERNEL);
block             239 sound/isa/gus/gus_mem.c 		if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
block             242 sound/isa/gus/gus_mem.c 	block.ptr = gus->gf1.default_voice_address;
block             243 sound/isa/gus/gus_mem.c 	block.size = 4;
block             244 sound/isa/gus/gus_mem.c 	block.name = kstrdup("Voice default (NULL's)", GFP_KERNEL);
block             245 sound/isa/gus/gus_mem.c 	if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
block             256 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block *block, *nblock;
block             259 sound/isa/gus/gus_mem.c 	block = alloc->first;
block             260 sound/isa/gus/gus_mem.c 	while (block) {
block             261 sound/isa/gus/gus_mem.c 		nblock = block->next;
block             262 sound/isa/gus/gus_mem.c 		snd_gf1_mem_xfree(alloc, block);
block             263 sound/isa/gus/gus_mem.c 		block = nblock;
block             274 sound/isa/gus/gus_mem.c 	struct snd_gf1_mem_block *block;
block             292 sound/isa/gus/gus_mem.c 	for (block = alloc->first, i = 0; block; block = block->next, i++) {
block             293 sound/isa/gus/gus_mem.c 		used += block->size;
block             294 sound/isa/gus/gus_mem.c 		snd_iprintf(buffer, "Block %i onboard 0x%x size %i (0x%x):\n", i, block->ptr, block->size, block->size);
block             295 sound/isa/gus/gus_mem.c 		if (block->share ||
block             296 sound/isa/gus/gus_mem.c 		    block->share_id[0] || block->share_id[1] ||
block             297 sound/isa/gus/gus_mem.c 		    block->share_id[2] || block->share_id[3])
block             299 sound/isa/gus/gus_mem.c 				block->share,
block             300 sound/isa/gus/gus_mem.c 				block->share_id[0], block->share_id[1],
block             301 sound/isa/gus/gus_mem.c 				block->share_id[2], block->share_id[3]);
block             303 sound/isa/gus/gus_mem.c 		block->flags & SNDRV_GF1_MEM_BLOCK_16BIT ? " 16-bit" : "");
block             305 sound/isa/gus/gus_mem.c 		switch (block->owner) {
block             307 sound/isa/gus/gus_mem.c 			snd_iprintf(buffer, "driver - %s\n", block->name);
block              64 sound/isa/gus/gus_pcm.c 	struct snd_gf1_dma_block block;
block              74 sound/isa/gus/gus_pcm.c 	memset(&block, 0, sizeof(block));
block              75 sound/isa/gus/gus_pcm.c 	block.cmd = SNDRV_GF1_DMA_IRQ;
block              77 sound/isa/gus/gus_pcm.c 		block.cmd |= SNDRV_GF1_DMA_UNSIGNED;
block              79 sound/isa/gus/gus_pcm.c 		block.cmd |= SNDRV_GF1_DMA_16BIT;
block              80 sound/isa/gus/gus_pcm.c 	block.addr = addr & ~31;
block              81 sound/isa/gus/gus_pcm.c 	block.buffer = runtime->dma_area + offset;
block              82 sound/isa/gus/gus_pcm.c 	block.buf_addr = runtime->dma_addr + offset;
block              83 sound/isa/gus/gus_pcm.c 	block.count = count;
block              84 sound/isa/gus/gus_pcm.c 	block.private_data = pcmp;
block              85 sound/isa/gus/gus_pcm.c 	block.ack = snd_gf1_pcm_block_change_ack;
block              86 sound/isa/gus/gus_pcm.c 	if (!snd_gf1_dma_transfer_block(pcmp->gus, &block, 0, 0))
block             431 sound/isa/gus/gus_pcm.c 		struct snd_gf1_mem_block *block;
block             436 sound/isa/gus/gus_pcm.c 		if ((block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
block             442 sound/isa/gus/gus_pcm.c 		pcmp->memory = block->ptr;
block             165 sound/isa/sb/emu8000_patch.c 	sp->block = snd_util_mem_alloc(hdr, truesize * 2);
block             166 sound/isa/sb/emu8000_patch.c 	if (sp->block == NULL) {
block             187 sound/isa/sb/emu8000_patch.c 	dram_offset = EMU8000_DRAM_OFFSET + (sp->block->offset >> 1);
block             205 sound/isa/sb/emu8000_patch.c 	if (! sp->block->offset) {
block             276 sound/isa/sb/emu8000_patch.c 	if (sp->block) {
block             277 sound/isa/sb/emu8000_patch.c 		snd_util_mem_free(hdr, sp->block);
block             278 sound/isa/sb/emu8000_patch.c 		sp->block = NULL;
block              44 sound/isa/sb/emu8000_pcm.c 	struct snd_util_memblk *block;
block             567 sound/isa/sb/emu8000_pcm.c 	if (rec->block) {
block             569 sound/isa/sb/emu8000_pcm.c 		snd_util_mem_free(rec->emu->memhdr, rec->block);
block             570 sound/isa/sb/emu8000_pcm.c 		rec->block = NULL;
block             574 sound/isa/sb/emu8000_pcm.c 	rec->block = snd_util_mem_alloc(rec->emu->memhdr, rec->allocated_bytes);
block             575 sound/isa/sb/emu8000_pcm.c 	if (! rec->block)
block             577 sound/isa/sb/emu8000_pcm.c 	rec->offset = EMU8000_DRAM_OFFSET + (rec->block->offset >> 1); /* in word */
block             591 sound/isa/sb/emu8000_pcm.c 	if (rec->block) {
block             597 sound/isa/sb/emu8000_pcm.c 		snd_util_mem_free(rec->emu->memhdr, rec->block);
block             598 sound/isa/sb/emu8000_pcm.c 		rec->block = NULL;
block             241 sound/pci/bt87x.c 				u32 block = i * 16 / periods;
block             243 sound/pci/bt87x.c 				cmd |= block << RISC_SET_STATUS_SHIFT;
block             244 sound/pci/bt87x.c 				cmd |= (~block & 0xf) << RISC_RESET_STATUS_SHIFT;
block              32 sound/pci/ctxfi/ctvmem.c 	struct ct_vm_block *block = NULL, *entry;
block              55 sound/pci/ctxfi/ctvmem.c 		block = entry;
block              59 sound/pci/ctxfi/ctvmem.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block              60 sound/pci/ctxfi/ctvmem.c 	if (!block)
block              63 sound/pci/ctxfi/ctvmem.c 	block->addr = entry->addr;
block              64 sound/pci/ctxfi/ctvmem.c 	block->size = size;
block              65 sound/pci/ctxfi/ctvmem.c 	list_add(&block->list, &vm->used);
block              72 sound/pci/ctxfi/ctvmem.c 	return block;
block              75 sound/pci/ctxfi/ctvmem.c static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
block              80 sound/pci/ctxfi/ctvmem.c 	block->size = CT_PAGE_ALIGN(block->size);
block              83 sound/pci/ctxfi/ctvmem.c 	list_del(&block->list);
block              84 sound/pci/ctxfi/ctvmem.c 	vm->size += block->size;
block              88 sound/pci/ctxfi/ctvmem.c 		if (entry->addr >= (block->addr + block->size))
block              92 sound/pci/ctxfi/ctvmem.c 		list_add_tail(&block->list, &vm->unused);
block              93 sound/pci/ctxfi/ctvmem.c 		entry = block;
block              95 sound/pci/ctxfi/ctvmem.c 		if ((block->addr + block->size) == entry->addr) {
block              96 sound/pci/ctxfi/ctvmem.c 			entry->addr = block->addr;
block              97 sound/pci/ctxfi/ctvmem.c 			entry->size += block->size;
block              98 sound/pci/ctxfi/ctvmem.c 			kfree(block);
block             100 sound/pci/ctxfi/ctvmem.c 			__list_add(&block->list, pos->prev, pos);
block             101 sound/pci/ctxfi/ctvmem.c 			entry = block;
block             126 sound/pci/ctxfi/ctvmem.c 	struct ct_vm_block *block;
block             132 sound/pci/ctxfi/ctvmem.c 	block = get_vm_block(vm, size, atc);
block             133 sound/pci/ctxfi/ctvmem.c 	if (block == NULL) {
block             140 sound/pci/ctxfi/ctvmem.c 	pte_start = (block->addr >> CT_PAGE_SHIFT);
block             141 sound/pci/ctxfi/ctvmem.c 	pages = block->size >> CT_PAGE_SHIFT;
block             148 sound/pci/ctxfi/ctvmem.c 	block->size = size;
block             149 sound/pci/ctxfi/ctvmem.c 	return block;
block             152 sound/pci/ctxfi/ctvmem.c static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
block             155 sound/pci/ctxfi/ctvmem.c 	put_vm_block(vm, block);
block             172 sound/pci/ctxfi/ctvmem.c 	struct ct_vm_block *block;
block             202 sound/pci/ctxfi/ctvmem.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block             203 sound/pci/ctxfi/ctvmem.c 	if (NULL != block) {
block             204 sound/pci/ctxfi/ctvmem.c 		block->addr = 0;
block             205 sound/pci/ctxfi/ctvmem.c 		block->size = vm->size;
block             206 sound/pci/ctxfi/ctvmem.c 		list_add(&block->list, &vm->unused);
block              53 sound/pci/ctxfi/ctvmem.h 	void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
block             141 sound/pci/emu10k1/emu10k1_callback.c 	if (vp->block) {
block             143 sound/pci/emu10k1/emu10k1_callback.c 		emem = (struct snd_emu10k1_memblk *)vp->block;
block             322 sound/pci/emu10k1/emu10k1_callback.c 	emem = (struct snd_emu10k1_memblk *)vp->block;
block             471 sound/pci/emu10k1/emu10k1_callback.c 	emem = (struct snd_emu10k1_memblk *)vp->block;
block              78 sound/pci/emu10k1/emu10k1_patch.c 	sp->block = snd_emu10k1_synth_alloc(emu, blocksize);
block              79 sound/pci/emu10k1/emu10k1_patch.c 	if (sp->block == NULL) {
block              95 sound/pci/emu10k1/emu10k1_patch.c 	snd_emu10k1_synth_bzero(emu, sp->block, offset, size);
block             104 sound/pci/emu10k1/emu10k1_patch.c 	if (snd_emu10k1_synth_copy_from_user(emu, sp->block, offset, data, size)) {
block             105 sound/pci/emu10k1/emu10k1_patch.c 		snd_emu10k1_synth_free(emu, sp->block);
block             106 sound/pci/emu10k1/emu10k1_patch.c 		sp->block = NULL;
block             118 sound/pci/emu10k1/emu10k1_patch.c 			unsigned short *wblock = (unsigned short*)block;
block             129 sound/pci/emu10k1/emu10k1_patch.c 				block[offset + i] = block[offset - i -1];
block             151 sound/pci/emu10k1/emu10k1_patch.c 	if (snd_emu10k1_synth_copy_from_user(emu, sp->block, offset, data, size)) {
block             152 sound/pci/emu10k1/emu10k1_patch.c 		snd_emu10k1_synth_free(emu, sp->block);
block             153 sound/pci/emu10k1/emu10k1_patch.c 		sp->block = NULL;
block             160 sound/pci/emu10k1/emu10k1_patch.c 		snd_emu10k1_synth_bzero(emu, sp->block, offset, blocksize - offset);
block             174 sound/pci/emu10k1/emu10k1_patch.c 			unsigned short *wblock = (unsigned short*)block;
block             179 sound/pci/emu10k1/emu10k1_patch.c 				block[i] ^= 0x80;
block             209 sound/pci/emu10k1/emu10k1_patch.c 	if (sp->block) {
block             210 sound/pci/emu10k1/emu10k1_patch.c 		snd_emu10k1_synth_free(emu, sp->block);
block             211 sound/pci/emu10k1/emu10k1_patch.c 		sp->block = NULL;
block             210 sound/pci/emu10k1/memory.c 	list_for_each(p, &emu->memhdr->block) {
block             457 sound/pci/emu10k1/memory.c 	if ((p = blk->mem.list.prev) != &hdr->block) {
block             463 sound/pci/emu10k1/memory.c 	if ((p = blk->mem.list.next) != &hdr->block) {
block             173 sound/pci/ice1712/prodigy_hifi.c 	unsigned int block;
block             181 sound/pci/ice1712/prodigy_hifi.c 	block = (reg << 9) | (data & 0x1ff);
block             182 sound/pci/ice1712/prodigy_hifi.c 	wm8766_spi_send_word(ice, block); /* REGISTER ADDRESS */
block             213 sound/pci/ice1712/prodigy_hifi.c 	unsigned int block;
block             219 sound/pci/ice1712/prodigy_hifi.c 	block =  ((AK4396_ADDR & 0x03) << 14) | (1 << 13) |
block             221 sound/pci/ice1712/prodigy_hifi.c 	ak4396_send_word(ice, block); /* REGISTER ADDRESS */
block             133 sound/pci/trident/trident_memory.c 	list_for_each(p, &hdr->block) {
block             200 sound/ppc/tumbler.c 	unsigned char block[6];
block             223 sound/ppc/tumbler.c 	block[0] = (left_vol >> 16) & 0xff;
block             224 sound/ppc/tumbler.c 	block[1] = (left_vol >> 8)  & 0xff;
block             225 sound/ppc/tumbler.c 	block[2] = (left_vol >> 0)  & 0xff;
block             227 sound/ppc/tumbler.c 	block[3] = (right_vol >> 16) & 0xff;
block             228 sound/ppc/tumbler.c 	block[4] = (right_vol >> 8)  & 0xff;
block             229 sound/ppc/tumbler.c 	block[5] = (right_vol >> 0)  & 0xff;
block             232 sound/ppc/tumbler.c 					   block) < 0) {
block             487 sound/ppc/tumbler.c 	unsigned char block[4];
block             499 sound/ppc/tumbler.c 		block[i] = (vol >> ((info->bytes - i - 1) * 8)) & 0xff;
block             501 sound/ppc/tumbler.c 					   info->bytes, block) < 0) {
block             624 sound/ppc/tumbler.c 	unsigned char block[9];
block             636 sound/ppc/tumbler.c 			block[i * 3 + j] = (vol >> ((2 - j) * 8)) & 0xff;
block             639 sound/ppc/tumbler.c 					   9, block) < 0) {
block             390 sound/soc/codecs/wm8994.c static void wm8994_set_retune_mobile(struct snd_soc_component *component, int block)
block             395 sound/soc/codecs/wm8994.c 	int base = wm8994_retune_mobile_base[block];
block             401 sound/soc/codecs/wm8994.c 	switch (block) {
block             415 sound/soc/codecs/wm8994.c 	cfg = wm8994->retune_mobile_cfg[block];
block             430 sound/soc/codecs/wm8994.c 		block,
block             467 sound/soc/codecs/wm8994.c 	int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
block             470 sound/soc/codecs/wm8994.c 	if (block < 0)
block             471 sound/soc/codecs/wm8994.c 		return block;
block             476 sound/soc/codecs/wm8994.c 	wm8994->retune_mobile_cfg[block] = value;
block             478 sound/soc/codecs/wm8994.c 	wm8994_set_retune_mobile(component, block);
block             488 sound/soc/codecs/wm8994.c 	int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
block             490 sound/soc/codecs/wm8994.c 	if (block < 0)
block             491 sound/soc/codecs/wm8994.c 		return block;
block             493 sound/soc/codecs/wm8994.c 	ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
block             334 sound/soc/codecs/wm8996.c static void wm8996_set_retune_mobile(struct snd_soc_component *component, int block)
block             343 sound/soc/codecs/wm8996.c 	switch (block) {
block             366 sound/soc/codecs/wm8996.c 	cfg = wm8996->retune_mobile_cfg[block];
block             381 sound/soc/codecs/wm8996.c 		block,
block             415 sound/soc/codecs/wm8996.c 	int block = wm8996_get_retune_mobile_block(kcontrol->id.name);
block             418 sound/soc/codecs/wm8996.c 	if (block < 0)
block             419 sound/soc/codecs/wm8996.c 		return block;
block             424 sound/soc/codecs/wm8996.c 	wm8996->retune_mobile_cfg[block] = value;
block             426 sound/soc/codecs/wm8996.c 	wm8996_set_retune_mobile(component, block);
block             436 sound/soc/codecs/wm8996.c 	int block = wm8996_get_retune_mobile_block(kcontrol->id.name);
block             438 sound/soc/codecs/wm8996.c 	if (block < 0)
block             439 sound/soc/codecs/wm8996.c 		return block;
block             440 sound/soc/codecs/wm8996.c 	ucontrol->value.enumerated.item[0] = wm8996->retune_mobile_cfg[block];
block              24 sound/soc/intel/atom/sst-atom-controls.c 					 u8 ipc_msg, u8 block,
block              32 sound/soc/intel/atom/sst-atom-controls.c 	byte_data->block = block;
block              48 sound/soc/intel/atom/sst-atom-controls.c 				 u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
block              54 sound/soc/intel/atom/sst-atom-controls.c 				block, task_id, pipe_id, len, cmd_data);
block              66 sound/soc/intel/atom/sst-atom-controls.c 				 u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
block              72 sound/soc/intel/atom/sst-atom-controls.c 	ret = sst_fill_and_send_cmd_unlocked(drv, ipc_msg, block,
block             512 sound/soc/intel/atom/sst-mfld-dsp.h 	u8 block;
block             511 sound/soc/intel/atom/sst/sst.c 	struct sst_block *block;
block             535 sound/soc/intel/atom/sst/sst.c 	block = sst_create_block(ctx, 0, FW_DWNL_ID);
block             536 sound/soc/intel/atom/sst/sst.c 	if (block == NULL)
block             542 sound/soc/intel/atom/sst/sst.c 	ret = sst_wait_timeout(ctx, block);
block             567 sound/soc/intel/atom/sst/sst.c 	sst_free_block(ctx, block);
block             461 sound/soc/intel/atom/sst/sst.h 				struct sst_block *block);
block             463 sound/soc/intel/atom/sst/sst.h 			struct sst_block *block);
block             478 sound/soc/intel/atom/sst/sst.h 		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
block              67 sound/soc/intel/atom/sst/sst_ipc.c 	struct sst_block *block = NULL;
block              72 sound/soc/intel/atom/sst/sst_ipc.c 	list_for_each_entry(block, &ctx->block_list, node) {
block              73 sound/soc/intel/atom/sst/sst_ipc.c 		dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
block              74 sound/soc/intel/atom/sst/sst_ipc.c 							block->drv_id);
block              75 sound/soc/intel/atom/sst/sst_ipc.c 		if (block->msg_id == ipc && block->drv_id == drv_id) {
block              77 sound/soc/intel/atom/sst/sst_ipc.c 			block->ret_code = result;
block              78 sound/soc/intel/atom/sst/sst_ipc.c 			block->data = data;
block              79 sound/soc/intel/atom/sst/sst_ipc.c 			block->size = size;
block              80 sound/soc/intel/atom/sst/sst_ipc.c 			block->condition = true;
block              95 sound/soc/intel/atom/sst/sst_ipc.c 	struct sst_block *block = NULL, *__block;
block              99 sound/soc/intel/atom/sst/sst_ipc.c 	list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
block             100 sound/soc/intel/atom/sst/sst_ipc.c 		if (block == freed) {
block             175 sound/soc/intel/atom/sst/sst_loader.c 	struct fw_block_info *block;
block             185 sound/soc/intel/atom/sst/sst_loader.c 	block = (void *)module + sizeof(*module);
block             188 sound/soc/intel/atom/sst/sst_loader.c 		if (block->size <= 0) {
block             192 sound/soc/intel/atom/sst/sst_loader.c 		switch (block->type) {
block             203 sound/soc/intel/atom/sst/sst_loader.c 			block = (void *)block + sizeof(*block) + block->size;
block             207 sound/soc/intel/atom/sst/sst_loader.c 					block->type, count);
block             212 sound/soc/intel/atom/sst/sst_loader.c 				ram_iomem + block->ram_offset,
block             213 sound/soc/intel/atom/sst/sst_loader.c 				(void *)block + sizeof(*block), block->size, 1);
block             217 sound/soc/intel/atom/sst/sst_loader.c 		block = (void *)block + sizeof(*block) + block->size;
block             395 sound/soc/intel/atom/sst/sst_loader.c 	struct sst_block *block;
block             410 sound/soc/intel/atom/sst/sst_loader.c 	block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID);
block             411 sound/soc/intel/atom/sst/sst_loader.c 	if (block == NULL)
block             434 sound/soc/intel/atom/sst/sst_loader.c 	ret_val = sst_wait_timeout(sst_drv_ctx, block);
block             446 sound/soc/intel/atom/sst/sst_loader.c 	sst_free_block(sst_drv_ctx, block);
block              83 sound/soc/intel/atom/sst/sst_pvt.c 				struct sst_block *block)
block              88 sound/soc/intel/atom/sst/sst_pvt.c 				block->condition)) {
block              90 sound/soc/intel/atom/sst/sst_pvt.c 		if (block->ret_code < 0) {
block              92 sound/soc/intel/atom/sst/sst_pvt.c 				"stream failed %d\n", block->ret_code);
block             115 sound/soc/intel/atom/sst/sst_pvt.c int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
block             126 sound/soc/intel/atom/sst/sst_pvt.c 		block->condition, block->msg_id, block->drv_id);
block             128 sound/soc/intel/atom/sst/sst_pvt.c 				block->condition,
block             132 sound/soc/intel/atom/sst/sst_pvt.c 				block->condition);
block             134 sound/soc/intel/atom/sst/sst_pvt.c 				block->ret_code);
block             135 sound/soc/intel/atom/sst/sst_pvt.c 		retval = -block->ret_code;
block             137 sound/soc/intel/atom/sst/sst_pvt.c 		block->on = false;
block             140 sound/soc/intel/atom/sst/sst_pvt.c 			block->condition, block->msg_id, sst_drv_ctx->sst_state);
block             188 sound/soc/intel/atom/sst/sst_pvt.c 		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
block             196 sound/soc/intel/atom/sst/sst_pvt.c 	*block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
block             197 sound/soc/intel/atom/sst/sst_pvt.c 	if (*block == NULL) {
block             228 sound/soc/intel/atom/sst/sst_pvt.c 	struct sst_block *block;
block             237 sound/soc/intel/atom/sst/sst_pvt.c 				&msg, large, sst, &block, ipc_msg, pvt_id);
block             273 sound/soc/intel/atom/sst/sst_pvt.c 		ret = sst_wait_timeout(sst, block);
block             277 sound/soc/intel/atom/sst/sst_pvt.c 		if (data && block->data) {
block             278 sound/soc/intel/atom/sst/sst_pvt.c 			*data = kmemdup(block->data, block->size, GFP_KERNEL);
block             287 sound/soc/intel/atom/sst/sst_pvt.c 		sst_free_block(sst, block);
block             176 sound/soc/intel/atom/sst/sst_stream.c 	struct sst_block *block = NULL;
block             180 sound/soc/intel/atom/sst/sst_stream.c 		bytes->type, bytes->ipc_msg, bytes->block, bytes->task_id,
block             189 sound/soc/intel/atom/sst/sst_stream.c 	msg->mrfld_header.p.header_high.part.res_rqd = bytes->block;
block             194 sound/soc/intel/atom/sst/sst_stream.c 	if (bytes->block) {
block             195 sound/soc/intel/atom/sst/sst_stream.c 		block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id);
block             196 sound/soc/intel/atom/sst/sst_stream.c 		if (block == NULL) {
block             207 sound/soc/intel/atom/sst/sst_stream.c 	if (bytes->block) {
block             208 sound/soc/intel/atom/sst/sst_stream.c 		ret = sst_wait_timeout(sst_drv_ctx, block);
block             211 sound/soc/intel/atom/sst/sst_stream.c 			sst_free_block(sst_drv_ctx, block);
block             220 sound/soc/intel/atom/sst/sst_stream.c 		if (bytes->block) {
block             221 sound/soc/intel/atom/sst/sst_stream.c 			unsigned char *r = block->data;
block             228 sound/soc/intel/atom/sst/sst_stream.c 	if (bytes->block)
block             229 sound/soc/intel/atom/sst/sst_stream.c 		sst_free_block(sst_drv_ctx, block);
block              60 sound/soc/intel/baytrail/sst-baytrail-dsp.c 	struct dma_block_info *block;
block              73 sound/soc/intel/baytrail/sst-baytrail-dsp.c 	block = (void *)module + sizeof(*module);
block              77 sound/soc/intel/baytrail/sst-baytrail-dsp.c 		if (block->size <= 0) {
block              82 sound/soc/intel/baytrail/sst-baytrail-dsp.c 		switch (block->type) {
block              84 sound/soc/intel/baytrail/sst-baytrail-dsp.c 			mod->offset = block->ram_offset +
block              89 sound/soc/intel/baytrail/sst-baytrail-dsp.c 			mod->offset = block->ram_offset +
block              94 sound/soc/intel/baytrail/sst-baytrail-dsp.c 			mod->offset = block->ram_offset +
block             100 sound/soc/intel/baytrail/sst-baytrail-dsp.c 				block->type, count);
block             104 sound/soc/intel/baytrail/sst-baytrail-dsp.c 		mod->size = block->size;
block             105 sound/soc/intel/baytrail/sst-baytrail-dsp.c 		mod->data = (void *)block + sizeof(*block);
block             109 sound/soc/intel/baytrail/sst-baytrail-dsp.c 		block = (void *)block + sizeof(*block) + block->size;
block             222 sound/soc/intel/common/sst-dsp-priv.h 	int (*enable)(struct sst_mem_block *block);
block             223 sound/soc/intel/common/sst-dsp-priv.h 	int (*disable)(struct sst_mem_block *block);
block             123 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block, *tmp;
block             127 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry(block, block_list, module_list) {
block             129 sound/soc/intel/common/sst-firmware.c 		if (block->ops && block->ops->disable) {
block             130 sound/soc/intel/common/sst-firmware.c 			err = block->ops->disable(block);
block             134 sound/soc/intel/common/sst-firmware.c 					block->type, block->index);
block             139 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, block_list, module_list) {
block             140 sound/soc/intel/common/sst-firmware.c 		list_del(&block->module_list);
block             141 sound/soc/intel/common/sst-firmware.c 		list_move(&block->list, &dsp->free_block_list);
block             143 sound/soc/intel/common/sst-firmware.c 			block->type, block->index, block->offset);
block             151 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block;
block             155 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry(block, block_list, module_list) {
block             157 sound/soc/intel/common/sst-firmware.c 		if (block->ops && block->ops->enable && !block->users) {
block             158 sound/soc/intel/common/sst-firmware.c 			ret = block->ops->enable(block);
block             162 sound/soc/intel/common/sst-firmware.c 					block->type, block->index);
block             170 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry(block, block_list, module_list) {
block             171 sound/soc/intel/common/sst-firmware.c 		if (block->ops && block->ops->disable)
block             172 sound/soc/intel/common/sst-firmware.c 			block->ops->disable(block);
block             560 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block;
block             562 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry(block, &dsp->free_block_list, list) {
block             563 sound/soc/intel/common/sst-firmware.c 		if (block->type == ba->type && block->offset == ba->offset)
block             564 sound/soc/intel/common/sst-firmware.c 			return block;
block             575 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block;
block             581 sound/soc/intel/common/sst-firmware.c 		block = find_block(dsp, ba);
block             582 sound/soc/intel/common/sst-firmware.c 		if (!block) {
block             590 sound/soc/intel/common/sst-firmware.c 		list_move_tail(&block->list, &tmp);
block             591 sound/soc/intel/common/sst-firmware.c 		ba->offset += block->size;
block             592 sound/soc/intel/common/sst-firmware.c 		ba->size -= block->size;
block             597 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry(block, &tmp, list) {
block             599 sound/soc/intel/common/sst-firmware.c 		if (block->offset < block_start)
block             600 sound/soc/intel/common/sst-firmware.c 			block_start = block->offset;
block             602 sound/soc/intel/common/sst-firmware.c 		list_add(&block->module_list, block_list);
block             605 sound/soc/intel/common/sst-firmware.c 			block->type, block->index, block->offset);
block             616 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block, *tmp;
block             623 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block             626 sound/soc/intel/common/sst-firmware.c 		if (block->type != ba->type)
block             629 sound/soc/intel/common/sst-firmware.c 		if (ba->size > block->size)
block             632 sound/soc/intel/common/sst-firmware.c 		ba->offset = block->offset;
block             633 sound/soc/intel/common/sst-firmware.c 		block->bytes_used = ba->size % block->size;
block             634 sound/soc/intel/common/sst-firmware.c 		list_add(&block->module_list, block_list);
block             635 sound/soc/intel/common/sst-firmware.c 		list_move(&block->list, &dsp->used_block_list);
block             637 sound/soc/intel/common/sst-firmware.c 			block->type, block->index, block->offset);
block             642 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block             645 sound/soc/intel/common/sst-firmware.c 		if (block->type != ba->type)
block             649 sound/soc/intel/common/sst-firmware.c 		if (ba->size > block->size) {
block             652 sound/soc/intel/common/sst-firmware.c 			ba->offset = block->offset;
block             705 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block, *tmp;
block             715 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, block_list, module_list) {
block             718 sound/soc/intel/common/sst-firmware.c 		if (block->type != ba->type)
block             721 sound/soc/intel/common/sst-firmware.c 		block_end = block->offset + block->size;
block             724 sound/soc/intel/common/sst-firmware.c 		if (ba->offset >= block->offset && end <= block_end)
block             728 sound/soc/intel/common/sst-firmware.c 		if (ba->offset >= block->offset && ba->offset < block_end) {
block             743 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block             744 sound/soc/intel/common/sst-firmware.c 		block_end = block->offset + block->size;
block             747 sound/soc/intel/common/sst-firmware.c 		if (block->type != ba->type)
block             751 sound/soc/intel/common/sst-firmware.c 		if (ba->offset >= block->offset && end <= block_end) {
block             754 sound/soc/intel/common/sst-firmware.c 			list_move(&block->list, &dsp->used_block_list);
block             755 sound/soc/intel/common/sst-firmware.c 			list_add(&block->module_list, block_list);
block             757 sound/soc/intel/common/sst-firmware.c 				block->type, block->index, block->offset);
block             762 sound/soc/intel/common/sst-firmware.c 		if (ba->offset >= block->offset && ba->offset < block_end) {
block             765 sound/soc/intel/common/sst-firmware.c 			list_move(&block->list, &dsp->used_block_list);
block             766 sound/soc/intel/common/sst-firmware.c 			list_add(&block->module_list, block_list);
block            1022 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block;
block            1024 sound/soc/intel/common/sst-firmware.c 	block = kzalloc(sizeof(*block), GFP_KERNEL);
block            1025 sound/soc/intel/common/sst-firmware.c 	if (block == NULL)
block            1028 sound/soc/intel/common/sst-firmware.c 	block->offset = offset;
block            1029 sound/soc/intel/common/sst-firmware.c 	block->size = size;
block            1030 sound/soc/intel/common/sst-firmware.c 	block->index = index;
block            1031 sound/soc/intel/common/sst-firmware.c 	block->type = type;
block            1032 sound/soc/intel/common/sst-firmware.c 	block->dsp = dsp;
block            1033 sound/soc/intel/common/sst-firmware.c 	block->private = private;
block            1034 sound/soc/intel/common/sst-firmware.c 	block->ops = ops;
block            1037 sound/soc/intel/common/sst-firmware.c 	list_add(&block->list, &dsp->free_block_list);
block            1040 sound/soc/intel/common/sst-firmware.c 	return block;
block            1047 sound/soc/intel/common/sst-firmware.c 	struct sst_mem_block *block, *tmp;
block            1052 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
block            1053 sound/soc/intel/common/sst-firmware.c 		list_del(&block->list);
block            1054 sound/soc/intel/common/sst-firmware.c 		kfree(block);
block            1058 sound/soc/intel/common/sst-firmware.c 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block            1059 sound/soc/intel/common/sst-firmware.c 		list_del(&block->list);
block            1060 sound/soc/intel/common/sst-firmware.c 		kfree(block);
block              82 sound/soc/intel/haswell/sst-haswell-dsp.c 	struct dma_block_info *block;
block             117 sound/soc/intel/haswell/sst-haswell-dsp.c 	block = (void *)module + sizeof(*module);
block             121 sound/soc/intel/haswell/sst-haswell-dsp.c 		if (le32_to_cpu(block->size) <= 0) {
block             128 sound/soc/intel/haswell/sst-haswell-dsp.c 		switch (le32_to_cpu(block->type)) {
block             131 sound/soc/intel/haswell/sst-haswell-dsp.c 			mod->offset = le32_to_cpu(block->ram_offset) +
block             138 sound/soc/intel/haswell/sst-haswell-dsp.c 			mod->offset = le32_to_cpu(block->ram_offset);
block             143 sound/soc/intel/haswell/sst-haswell-dsp.c 				block->type, count);
block             148 sound/soc/intel/haswell/sst-haswell-dsp.c 		mod->size = le32_to_cpu(block->size);
block             149 sound/soc/intel/haswell/sst-haswell-dsp.c 		mod->data = (void *)block + sizeof(*block);
block             154 sound/soc/intel/haswell/sst-haswell-dsp.c 			count, mod->type, block->size, ram,
block             155 sound/soc/intel/haswell/sst-haswell-dsp.c 			block->ram_offset);
block             165 sound/soc/intel/haswell/sst-haswell-dsp.c 		block = (void *)block + sizeof(*block) +
block             166 sound/soc/intel/haswell/sst-haswell-dsp.c 			le32_to_cpu(block->size);
block             493 sound/soc/intel/haswell/sst-haswell-dsp.c static u32 hsw_block_get_bit(struct sst_mem_block *block)
block             496 sound/soc/intel/haswell/sst-haswell-dsp.c 	struct sst_dsp *sst = block->dsp;
block             504 sound/soc/intel/haswell/sst-haswell-dsp.c 		switch (block->type) {
block             517 sound/soc/intel/haswell/sst-haswell-dsp.c 	bit = 1 << (block->index + shift);
block             523 sound/soc/intel/haswell/sst-haswell-dsp.c static void sst_mem_block_dummy_read(struct sst_mem_block *block)
block             527 sound/soc/intel/haswell/sst-haswell-dsp.c 	struct sst_dsp *sst = block->dsp;
block             529 sound/soc/intel/haswell/sst-haswell-dsp.c 	size = block->size > 4 ? 4 : block->size;
block             530 sound/soc/intel/haswell/sst-haswell-dsp.c 	memcpy_fromio(tmp_buf, sst->addr.lpe + block->offset, size);
block             534 sound/soc/intel/haswell/sst-haswell-dsp.c static int hsw_block_enable(struct sst_mem_block *block)
block             536 sound/soc/intel/haswell/sst-haswell-dsp.c 	struct sst_dsp *sst = block->dsp;
block             539 sound/soc/intel/haswell/sst-haswell-dsp.c 	if (block->users++ > 0)
block             542 sound/soc/intel/haswell/sst-haswell-dsp.c 	dev_dbg(block->dsp->dev, " enabled block %d:%d at offset 0x%x\n",
block             543 sound/soc/intel/haswell/sst-haswell-dsp.c 		block->type, block->index, block->offset);
block             551 sound/soc/intel/haswell/sst-haswell-dsp.c 	bit = hsw_block_get_bit(block);
block             565 sound/soc/intel/haswell/sst-haswell-dsp.c 	sst_mem_block_dummy_read(block);
block             570 sound/soc/intel/haswell/sst-haswell-dsp.c static int hsw_block_disable(struct sst_mem_block *block)
block             572 sound/soc/intel/haswell/sst-haswell-dsp.c 	struct sst_dsp *sst = block->dsp;
block             575 sound/soc/intel/haswell/sst-haswell-dsp.c 	if (--block->users > 0)
block             578 sound/soc/intel/haswell/sst-haswell-dsp.c 	dev_dbg(block->dsp->dev, " disabled block %d:%d at offset 0x%x\n",
block             579 sound/soc/intel/haswell/sst-haswell-dsp.c 		block->type, block->index, block->offset);
block             588 sound/soc/intel/haswell/sst-haswell-dsp.c 	bit = hsw_block_get_bit(block);
block             264 sound/soc/sof/loader.c 	struct snd_sof_blk_hdr *block;
block             272 sound/soc/sof/loader.c 	block = (struct snd_sof_blk_hdr *)((u8 *)module + sizeof(*module));
block             278 sound/soc/sof/loader.c 		if (remaining < sizeof(*block)) {
block             284 sound/soc/sof/loader.c 		remaining -= sizeof(*block);
block             286 sound/soc/sof/loader.c 		if (block->size == 0) {
block             290 sound/soc/sof/loader.c 				 block->type, block->offset);
block             294 sound/soc/sof/loader.c 		switch (block->type) {
block             301 sound/soc/sof/loader.c 			offset = block->offset;
block             302 sound/soc/sof/loader.c 			bar = snd_sof_dsp_get_bar_index(sdev, block->type);
block             306 sound/soc/sof/loader.c 					block->type);
block             312 sound/soc/sof/loader.c 				block->type, count);
block             318 sound/soc/sof/loader.c 			count, block->type, block->size, offset);
block             321 sound/soc/sof/loader.c 		if (block->size % sizeof(u32)) {
block             323 sound/soc/sof/loader.c 				block->size);
block             327 sound/soc/sof/loader.c 					block + 1, block->size);
block             329 sound/soc/sof/loader.c 		if (remaining < block->size) {
block             335 sound/soc/sof/loader.c 		remaining -= block->size;
block             337 sound/soc/sof/loader.c 		block = (struct snd_sof_blk_hdr *)((u8 *)block + sizeof(*block)
block             338 sound/soc/sof/loader.c 			+ block->size);
block             103 sound/synth/emux/emux_synth.c 			vp->block = vp->zone->sample->block;
block             105 sound/synth/emux/emux_synth.c 			vp->block = NULL;
block             500 sound/synth/emux/emux_synth.c 	vp->block = NULL;
block              34 sound/synth/util_mem.c 	INIT_LIST_HEAD(&hdr->block);
block              49 sound/synth/util_mem.c 	while ((p = hdr->block.next) != &hdr->block) {
block              78 sound/synth/util_mem.c 	list_for_each(p, &hdr->block) {
block             107 sound/synth/util_mem.c 	if (prev == &hdr->block)